diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index 66973e06..229c112c 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -33,6 +33,7 @@ jobs: python -m pip install wheel scipy IPython astropy extension-helpers mpmath python -m pip install git+https://github.com/FRBs/ne2001.git#egg=ne2001 python -m pip install git+https://github.com/FRBs/FRB.git#egg=frb + python -m pip install git+https://github.com/FRBs/astropath.git#egg=astropath - name: Test with tox run: | tox -e ${{ matrix.toxenv }} diff --git a/docs/conf.py b/docs/conf.py index 3a9b2bd1..3f2865a2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -5,10 +5,34 @@ import os import sys +from unittest.mock import MagicMock # Add the project root to the path for autodoc sys.path.insert(0, os.path.abspath('..')) +# Inject mocks for GitHub-only packages directly into sys.modules. +# This must be done before Sphinx loads any extensions, because +# sphinx-automodapi's automodsumm handler fires at builder-inited — +# earlier than autodoc_mock_imports takes effect — and will fail with +# "No module named X" if the package is not installed. +# +# Packages mocked here: +# astropath: pip install git+https://github.com/FRBs/astropath.git +# frb: pip install git+https://github.com/FRBs/FRB.git +# ne2001: pip install git+https://github.com/FRBs/ne2001.git +_MOCK_MODULES = [ + 'astropath', + 'astropath.priors', + 'astropath.path', + 'frb', + 'frb.frb', + 'frb.dm', + 'frb.associate', + 'ne2001', +] +for _mod in _MOCK_MODULES: + sys.modules[_mod] = MagicMock() + # -- Project information ----------------------------------------------------- project = 'zdm' copyright = '2024, Clancy James and contributors' @@ -107,7 +131,13 @@ } # Mock imports for modules that may not be available during doc build +# These packages can only be installed from GitHub and are not available +# on ReadTheDocs or in the standard docs build environment: +# ne2001: pip install git+https://github.com/FRBs/ne2001.git +# frb: pip install git+https://github.com/FRBs/FRB.git +# astropath: pip install git+https://github.com/FRBs/astropath.git autodoc_mock_imports = [ 'ne2001', 'frb', + 'astropath', ] diff --git a/docs/index.rst b/docs/index.rst index 08c536ad..6bebe91a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -43,6 +43,7 @@ Getting Started architecture parameters api + optical .. toctree:: :maxdepth: 1 diff --git a/docs/optical.rst b/docs/optical.rst new file mode 100644 index 00000000..808897ae --- /dev/null +++ b/docs/optical.rst @@ -0,0 +1,435 @@ +.. _optical: + +================================ +Optical Host Galaxy Association +================================ + +This section describes the three modules that connect zdm's redshift-DM +predictions to the `PATH `_ (Probabilistic +Association of Transients to their Hosts) algorithm. Together they provide +physically motivated priors on FRB host galaxy apparent magnitudes, derived +from the zdm posterior p(z | DM\ :sub:`EG`). + +- :mod:`zdm.optical_params` — parameter dataclasses configuring each model +- :mod:`zdm.optical` — host magnitude models and the PATH interface wrapper +- :mod:`zdm.optical_numerics` — numerical evaluation, optimisation, and + statistics for fitting the models to CRAFT ICS optical data + +Overview +======== + +Standard PATH assigns host galaxy candidates a prior based only on galaxy +surface density and angular size. The zdm optical modules replace this with a +prior informed by p(z | DM\ :sub:`EG`): given an FRB's extragalactic DM, zdm +predicts a redshift distribution, which is convolved with a host galaxy +luminosity model to produce p(m\ :sub:`r` | DM\ :sub:`EG`). + +The modules are built around a two-layer design: + +.. code-block:: text + + ┌──────────────────────────────────────────────────────────────┐ + │ model_wrapper (optical.py) │ + │ Convolves p(m_r|z) with zdm p(z|DM_EG) → p(m_r|DM_EG) │ + │ Estimates P_U (undetected host prior) │ + │ Plugs into PATH via pathpriors.USR_raw_prior_Oi │ + └──────────────────────────────────────────────────────────────┘ + │ wraps + ┌──────────────────────────────────────────────────────────────┐ + │ Host magnitude models (optical.py) │ + │ │ + │ simple_host_model — parametric p(M_r) histogram │ + │ loudas_model — mass/SFR-weighted tables (Loudas) │ + │ marnoch_model — Gaussian fit to known hosts │ + │ (Marnoch et al. 2023) │ + └──────────────────────────────────────────────────────────────┘ + │ configured by + ┌──────────────────────────────────────────────────────────────┐ + │ Parameter dataclasses (optical_params.py) │ + │ │ + │ OpticalState ← SimpleParams, LoudasParams, │ + │ Apparent, Identification │ + └──────────────────────────────────────────────────────────────┘ + +Host Magnitude Models +===================== + +Three models are available, all implementing the same interface +``get_pmr_gz(mrbins, z)`` which returns p(m\ :sub:`r` | z) for a set of +apparent magnitude bin edges at a given redshift. + +simple_host_model +----------------- + +A parametric model describing the intrinsic absolute magnitude distribution +p(M\ :sub:`r`) as N amplitudes (default 10) at uniformly spaced points +between ``Absmin`` and ``Absmax``. The amplitudes are normalised to sum to +unity and interpolated onto a fine internal grid via one of four schemes +controlled by ``AbsModelID``: + +.. list-table:: + :header-rows: 1 + :widths: 15 85 + + * - ``AbsModelID`` + - Description + * - 0 + - Step-function histogram — each parameter value applies uniformly to + its bin + * - 1 + - Linear interpolation between parameter points *(default)* + * - 2 + - Cubic spline interpolation (negative values clamped to zero) + * - 3 + - Cubic spline in log-space (parameters are log\ :sub:`10` weights) + +Conversion from M\ :sub:`r` to m\ :sub:`r` is controlled by ``AppModelID``: + +.. list-table:: + :header-rows: 1 + :widths: 15 85 + + * - ``AppModelID`` + - Description + * - 0 + - Pure distance modulus, no k-correction *(default)* + * - 1 + - Distance modulus plus power-law k-correction + ``2.5 × k × log10(1 + z)`` + +loudas_model +------------ + +Uses precomputed p(m\ :sub:`r` | z) tables from Nick Loudas, constructed by +weighting galaxy luminosities by either stellar mass or star-formation rate. +The single free parameter ``fSFR`` interpolates between the two: + +.. math:: + + p(m_r | z) = (1 - f_{\rm SFR})\,p_{\rm mass}(m_r | z) + + f_{\rm SFR}\,p_{\rm SFR}(m_r | z) + +Interpolation between tabulated redshift bins is performed in log-z space +with a luminosity-distance shift applied to each tabulated distribution before +combining, ensuring correct apparent magnitude evolution at low redshift. + +marnoch_model +------------- + +A zero-parameter model based on Marnoch et al. 2023 (MNRAS 525, 994). Fits +a Gaussian to the r-band magnitude distribution of known CRAFT ICS FRB host +galaxies, with mean and standard deviation described as cubic splines of +redshift. No free parameters; the model is fixed by the observed host sample. + +The ``model_wrapper`` Class +=========================== + +:class:`~zdm.optical.model_wrapper` is a survey-independent wrapper around +any host model. Its key responsibilities are: + +1. **Precomputation**: at initialisation it calls ``model.get_pmr_gz`` for + every redshift value in the zdm grid to build a cached + p(m\ :sub:`r` | z) array. +2. **DM integration**: ``init_path_raw_prior_Oi(DM, grid)`` extracts + p(z | DM\ :sub:`EG`) from the grid and convolves it with the cached + array to produce p(m\ :sub:`r` | DM\ :sub:`EG`). +3. **P_U estimation**: ``estimate_unseen_prior()`` integrates the magnitude + prior against the detection probability curve + (logistic function centred on ``pU_mean`` with width ``pU_width``) to + obtain the prior probability that the true host is below the detection + limit. +4. **PATH interface**: after ``init_path_raw_prior_Oi`` is called, + ``pathpriors.USR_raw_prior_Oi`` is automatically pointed at + ``path_raw_prior_Oi``, so PATH uses the zdm-derived prior transparently. + +Typical Workflow +================ + +The following example shows how to obtain zdm-informed PATH posteriors for a +single CRAFT ICS FRB. + +.. code-block:: python + + from zdm import optical as opt + from zdm import optical_numerics as on + from zdm import loading, cosmology as cos, parameters + + # 1. Initialise zdm grid + state = parameters.State() + cos.set_cosmology(state) + cos.init_dist_measures() + ss, gs = loading.surveys_and_grids(survey_names=['CRAFT_ICS_1300']) + g, s = gs[0], ss[0] + + # 2. Choose a host magnitude model + model = opt.marnoch_model() # or simple_host_model / loudas_model + + # 3. Wrap it for the survey's redshift grid + wrapper = opt.model_wrapper(model, g.zvals) + + # 4. For a specific FRB, look up its DM_EG + frb = 'FRB20190608B' + imatch = opt.matchFRB(frb, s) + DMEG = s.DMEGs[imatch] + + # 5. Compute p(m_r | DM_EG) and estimate P_U + wrapper.init_path_raw_prior_Oi(DMEG, g) # also sets pathpriors.USR_raw_prior_Oi + PU = wrapper.estimate_unseen_prior() + + # 6. Run PATH with the zdm prior + P_O, P_Ox, P_Ux, mags, ptbl = on.run_path(frb, usemodel=True, P_U=PU) + +To process the full CRAFT ICS sample and compare models, use +:func:`~zdm.optical_numerics.calc_path_priors` directly. To fit model +parameters, pass :func:`~zdm.optical_numerics.function` as the objective to +``scipy.optimize.minimize`` — see ``zdm/scripts/Path/optimise_host_priors.py`` +for a complete example. + +Parameter Reference +=================== + +All host galaxy model parameters are held in dataclasses collected by +:class:`~zdm.optical_params.OpticalState`. The four constituent dataclasses +and their parameters are described below. + +SimpleParams +------------ + +Controls the :class:`~zdm.optical.simple_host_model`. + +.. list-table:: + :header-rows: 1 + :widths: 20 12 12 56 + + * - Parameter + - Default + - Units + - Description + * - ``Absmin`` + - −25 + - M\ :sub:`r` + - Minimum absolute magnitude of the host distribution + * - ``Absmax`` + - −15 + - M\ :sub:`r` + - Maximum absolute magnitude of the host distribution + * - ``NAbsBins`` + - 1000 + - — + - Number of internal absolute magnitude bins (fine grid for + computing p(m\ :sub:`r` | z)) + * - ``NModelBins`` + - 10 + - — + - Number of free parameter bins describing p(M\ :sub:`r`) + * - ``AbsPriorMeth`` + - 0 + - — + - Initial prior on absolute magnitudes: 0 = uniform + * - ``AbsModelID`` + - 1 + - — + - Interpolation scheme for p(M\ :sub:`r`): 0 = histogram, + 1 = linear, 2 = spline, 3 = log-spline + * - ``AppModelID`` + - 0 + - — + - Absolute-to-apparent conversion: 0 = distance modulus only, + 1 = with power-law k-correction + * - ``k`` + - 0.0 + - — + - k-correction power-law index (only used when ``AppModelID=1``) + +LoudasParams +------------ + +Controls the :class:`~zdm.optical.loudas_model`. + +.. list-table:: + :header-rows: 1 + :widths: 20 12 12 56 + + * - Parameter + - Default + - Units + - Description + * - ``fSFR`` + - 0.5 + - — + - Fraction of FRB hosts tracing star-formation rate (0 = pure + mass-weighted, 1 = pure SFR-weighted) + * - ``NzBins`` + - 10 + - — + - Number of redshift bins for histogram calculations + * - ``zmin`` + - 0.0 + - — + - Minimum redshift for p(m\ :sub:`r`) calculation + * - ``zmax`` + - 0.0 + - — + - Maximum redshift for p(m\ :sub:`r`) calculation + * - ``NMrBins`` + - 0 + - — + - Number of absolute magnitude bins + * - ``Mrmin`` + - 0.0 + - M\ :sub:`r` + - Minimum absolute magnitude + * - ``Mrmax`` + - 0.0 + - M\ :sub:`r` + - Maximum absolute magnitude + +Apparent +-------- + +Controls the apparent magnitude grid used by :class:`~zdm.optical.model_wrapper`. + +.. list-table:: + :header-rows: 1 + :widths: 20 12 12 56 + + * - Parameter + - Default + - Units + - Description + * - ``Appmin`` + - 10 + - m\ :sub:`r` + - Minimum apparent magnitude of the internal grid + * - ``Appmax`` + - 35 + - m\ :sub:`r` + - Maximum apparent magnitude of the internal grid + * - ``NAppBins`` + - 250 + - — + - Number of apparent magnitude bins + +Identification +-------------- + +Controls the survey detection completeness model used to compute P_U. +The detection probability is modelled as a logistic function: +p(detected | m\ :sub:`r`) = 1 − p(U | m\ :sub:`r`) where + +.. math:: + + p(U | m_r) = \frac{1}{1 + \exp\!\left(\frac{\mu - m_r}{w}\right)} + +with μ = ``pU_mean`` and w = ``pU_width``. + +.. list-table:: + :header-rows: 1 + :widths: 20 12 12 56 + + * - Parameter + - Default + - Units + - Description + * - ``pU_mean`` + - 26.385 + - m\ :sub:`r` + - Magnitude at which 50 % of host galaxies are undetected + (the survey's half-completeness limit). Default value is + calibrated to VLT/FORS2 R-band observations. + * - ``pU_width`` + - 0.279 + - m\ :sub:`r` + - Characteristic width of the completeness rolloff. Smaller + values give a sharper transition between detected and + undetected regimes. + +Optimisation and Statistics +============================ + +:mod:`zdm.optical_numerics` provides two goodness-of-fit statistics for +comparing the model-predicted apparent magnitude prior to observed PATH +posteriors across a sample of FRBs: + +**Maximum-likelihood statistic** (:func:`~zdm.optical_numerics.calculate_likelihood_statistic`) + +For each FRB, evaluates + +.. math:: + + \ln \mathcal{L}_i = \log_{10}\!\left(\sum_j \frac{P(O_j|x)}{s_i} + P_{U,i}^{\rm prior}\right) + +where the sum runs over candidate host galaxies and *s*\ :sub:`i` is a +rescale factor that undoes PATH's internal renormalisation. The total +statistic is Σ ln *ℒ*\ :sub:`i` over all FRBs. This is the recommended +objective for parameter fitting. + +**KS-like statistic** (:func:`~zdm.optical_numerics.calculate_ks_statistic`) + +Builds normalised cumulative distributions of the model prior and the +observed posteriors (weighted by P(O|x)) over apparent magnitude, then +returns the maximum absolute difference — analogous to the KS test statistic. +Smaller values indicate a better fit. + +Both statistics accept a ``POxcut`` argument to restrict the sample to FRBs +with a confidently identified host (max P(O|x) > threshold), simulating a +traditional host-identification approach. + +Scripts +======= + +Ready-to-run scripts using these modules are in ``zdm/scripts/Path/``: + +.. list-table:: + :header-rows: 1 + :widths: 40 60 + + * - Script + - Purpose + * - ``estimate_path_priors.py`` + - Demonstrate zdm-informed PATH priors on all CRAFT ICS FRBs; + compare flat vs. model priors; save posterior magnitude histogram + * - ``optimise_host_priors.py`` + - Fit host model parameters to the CRAFT ICS sample using + ``scipy.optimize.minimize`` + * - ``plot_host_models.py`` + - Visualise all three host models and compare their PATH posteriors + across the CRAFT ICS sample + +API Reference +============= + +optical_params +-------------- + +Parameter dataclasses for configuring host galaxy models. + +.. automodapi:: zdm.optical_params + :no-inheritance-diagram: + +optical +------- + +Host magnitude model classes and the ``model_wrapper`` PATH interface. + +.. automodapi:: zdm.optical + :no-inheritance-diagram: + +optical_numerics +---------------- + +Numerical evaluation, optimisation, and statistics for fitting host models. + +.. automodapi:: zdm.optical_numerics + :no-inheritance-diagram: + +References +========== + +- Marnoch et al. 2023, MNRAS 525, 994 — + FRB host galaxy r-band magnitude model + (https://doi.org/10.1093/mnras/stad2353) +- Macquart et al. 2020, Nature 581, 391 — + Macquart relation / p(DM | z) +- Aggarwal et al. 2021, ApJ 911, 95 — + PATH algorithm for probabilistic host association diff --git a/papers/CRACO/ImprovedSurveys/CRAFT_CRACO_900_imp1.ecsv b/papers/CRACO/ImprovedSurveys/CRAFT_CRACO_900_imp1.ecsv index 19b075d2..69b3b845 100644 --- a/papers/CRACO/ImprovedSurveys/CRAFT_CRACO_900_imp1.ecsv +++ b/papers/CRACO/ImprovedSurveys/CRAFT_CRACO_900_imp1.ecsv @@ -25,5 +25,5 @@ # "TRES": 13.8, "THRESH": 0.309} }'} # schema: astropy-2.0 TNS BW DM DMG FBAR FRES Gb Gl NREP SNR SNRTHRESH THRESH TRES WIDTH XDec XRA Z -DUMMY 288.0 401.4 35.0 906 1.0 "" "" 1 27.9 9.5 0.309 13.8 4.0 "" "" 0.1 +DUMMY 288.0 401.4 35.0 906 1.0 "" "" 1 27.9 9.5 0.2472 13.8 4.0 "" "" 0.1 diff --git a/papers/CRACO/ImprovedSurveys/CRAFT_CRACO_900_imp_all.ecsv b/papers/CRACO/ImprovedSurveys/CRAFT_CRACO_900_imp_all.ecsv index d9d94349..09430019 100644 --- a/papers/CRACO/ImprovedSurveys/CRAFT_CRACO_900_imp_all.ecsv +++ b/papers/CRACO/ImprovedSurveys/CRAFT_CRACO_900_imp_all.ecsv @@ -25,5 +25,5 @@ # "TRES": 1.728, "THRESH": 0.309} }'} # schema: astropy-2.0 TNS BW DM DMG FBAR FRES Gb Gl NREP SNR SNRTHRESH THRESH TRES WIDTH XDec XRA Z -DUMMY 288.0 401.4 35.0 906 0.167 "" "" 1 27.9 9.5 0.309 1.728 4.0 "" "" 0.1 +DUMMY 288.0 401.4 35.0 906 0.167 "" "" 1 27.9 9.5 0.2472 1.728 4.0 "" "" 0.1 diff --git a/papers/CRACO/TestSurveys/CRAFT_CRACO_900_3ms_alldm_zDM.pdf b/papers/CRACO/TestSurveys/CRAFT_CRACO_900_3ms_alldm_zDM.pdf deleted file mode 100644 index edd5a915..00000000 Binary files a/papers/CRACO/TestSurveys/CRAFT_CRACO_900_3ms_alldm_zDM.pdf and /dev/null differ diff --git a/papers/CRACO/TestSurveys/CRAFT_CRACO_900_3ms_zDM.pdf b/papers/CRACO/TestSurveys/CRAFT_CRACO_900_3ms_zDM.pdf deleted file mode 100644 index edd5a915..00000000 Binary files a/papers/CRACO/TestSurveys/CRAFT_CRACO_900_3ms_zDM.pdf and /dev/null differ diff --git a/papers/CRACO/plot_900_improvements.py b/papers/CRACO/plot_900_improvements.py index fe1edc6d..665b700b 100644 --- a/papers/CRACO/plot_900_improvements.py +++ b/papers/CRACO/plot_900_improvements.py @@ -95,7 +95,7 @@ def main(): "1: $t_{\\rm res}=1.7$ ms", "2: $\\nu_{\\rm res} = 167$ kHz", "3: Perfect imaging", - "4: $T_{\\rm sys}=25^{\\circ}$ K"] + "4: $T_{\\rm sys}=20^{\\circ}$ K"] linestyles=["-",":","--","-.","-"] nz=400 diff --git a/papers/CRACO/plot_askap_2030.py b/papers/CRACO/plot_askap_2030.py new file mode 100644 index 00000000..3f7a7e31 --- /dev/null +++ b/papers/CRACO/plot_askap_2030.py @@ -0,0 +1,261 @@ +""" +This script creates zdm grids for ASKAP incoherent sum observations. + +It exists partly to calculate relative rates from surveys + +For CHIME 1.28, it's 2.54 +For updated, it's 1.88 + +""" +import os + +from astropy.cosmology import Planck18 +from zdm import cosmology as cos +from zdm import figures +from zdm import parameters +from zdm import survey +from zdm import pcosmic +from zdm import iteration as it +from zdm import loading +from zdm import io +from zdm import optical as opt +from zdm import states +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + + +import numpy as np +from zdm import survey +from matplotlib import pyplot as plt +import importlib.resources as resources + +def main(): + + # in case you wish to switch to another output directory + + opdir="ImprovedSurveys/" + + # approximate best-fit values from recent analysis + # best-fit from Jordan et al + state = states.load_state("HoffmannHalo25")#,scat="updated",rep=None) + + if not os.path.exists(opdir): + os.mkdir(opdir) + + # Initialise surveys and grids + sdir = resources.files('zdm').joinpath('../papers/CRACO/ImprovedSurveys') + """ + ORIGINAL INVESTIGATION + names=['CRAFT_CRACO_900','CRAFT_CRACO_900_imp1', + 'CRAFT_CRACO_900_imp2.1_div2','CRAFT_CRACO_900_imp2.1_div4','CRAFT_CRACO_900_imp2.1_div8', + 'CRAFT_CRACO_900_imp2.2_div2','CRAFT_CRACO_900_imp2.2_div4','CRAFT_CRACO_900_imp2.2_div8', + 'CRAFT_CRACO_900_imp2.3_div2','CRAFT_CRACO_900_imp2.3_div4','CRAFT_CRACO_900_imp2.3_div8', + 'CRAFT_CRACO_900_imp3' + ] + labels = ["CRACO 900 MHz","1: $T_{\\rm sys}=25^{\\circ}$ K", + "2.1 $t_{\\rm rs}=6.8$\\,ms","2.1 $t_{\\rm rs}=3.4$\\,ms","2.1 $t_{\\rm rs}=1.7$\\,ms", + "2.2 $t_{\\rm rs}=6.8$\\,ms","2.2 $t_{\\rm rs}=3.4$\\,ms","2.2 $t_{\\rm rs}=1.7$\\,ms", + "2.3 $t_{\\rm rs}=6.8$\\,ms","2.3 $t_{\\rm rs}=3.4$\\,ms","2.3 $t_{\\rm rs}=1.7$\\,ms", + "3: $\\nu_{\\rm res}=167\\,kHz" + ] + linestyles=["-","-","-.","-.","-.","--","--","--",":",":",":","-"] + """ + + """ + names=['CRAFT_CRACO_900', + 'CRAFT_CRACO_900_imp2.3_div2','CRAFT_CRACO_900_imp2.3_div4','CRAFT_CRACO_900_imp2.3_div8', + 'CRAFT_CRACO_900_imp2.4_div2','CRAFT_CRACO_900_imp2.4_div4','CRAFT_CRACO_900_imp2.4_div8', + 'CRAFT_CRACO_900_imp2.4_div8_primary', + #'CRAFT_CRACO_900_imp1', + 'CRAFT_CRACO_900_imp_all' + ] + labels = ["CRACO 900 MHz", + "1: $t_{\\rm rs}=6.8$\\,ms","2: $t_{\\rm rs}=3.4$\\,ms","2: $t_{\\rm rs}=1.7$\\,ms", + "2: $t_{\\rm rs}=6.8$\\,ms","3: $t_{\\rm rs}=3.4$\\,ms","3: $t_{\\rm rs}=1.7$\\,ms", + "3: Perfect imaging", + #"4: $T_{\\rm sys}=25^{\\circ}$ K", + "4: $T_{\\rm sys}=25^{\\circ}$ K"] + linestyles=["-",":",":",":","--","--","--","-.","-"] + """ + + + nz=400 + zmax=4 + ndm=500 + dmmax=5000 + + # purely to get normalisation - relative to CRAFT ICS + names = ['CRAFT_ICS_892'] + #names = ['CRAFT_class_I_and_II'] + ss,gs = loading.surveys_and_grids(survey_names=names,repeaters=False, + init_state=state, + zmax=zmax,nz=nz,dmmax=dmmax,ndm=ndm) + + + rate = np.sum(gs[0].get_rates()) + # normalises rate to actual observed rate + actual_rate = ss[0].NORM_FRB/ss[0].TOBS + multiplier = actual_rate/rate + print("Calculated rate multiplier of ",multiplier) + + names=['CRAFT_CRACO_900', + 'CRAFT_CRACO_900_imp1', + 'CRAFT_CRACO_900_imp2.4_div8_primary', + 'CRAFT_CRACO_900_imp_all' + ] + labels = ["CRACO 900 MHz", + "1: $T_{\\rm sys}=20^{\\circ}$ K PAFs", + "2: Perfect CRACO", + "3: Perfect CRACO + $T_{\\rm sys}=25^{\\circ}$ K PAFs"] + linestyles=["-",":","--","-.","-",":","--"] + + + ss,gs = loading.surveys_and_grids(survey_names=names,repeaters=False, + init_state=state,sdir=sdir, + zmax=zmax,nz=nz,dmmax=dmmax,ndm=ndm) + + + + ######### plots total DM and z distribution ####### + # set limits for plots - will be LARGE! + DMmax=4000 + zmax=4. + + plt.figure() + ax1 = plt.gca() + plt.xlabel("redshift $z$") + plt.ylabel("p(z) [a.u.]") + plt.xlim(0.01,3) + plt.ylim(0,1) + #plt.ylim(0,80) + + plt.figure() + ax2 = plt.gca() + plt.xlabel("DM pc cm$^{-3}$") + plt.ylabel("p(DM) [a.u.]") + plt.xlim(0,3000) + plt.ylim(0,1) + #plt.ylim(0,0.0009) + + zvals = gs[0].zvals + dz = zvals[1]-zvals[0] + dmvals = gs[0].dmvals + ddm = dmvals[1]-dmvals[0] + + pzs=[] + pdms=[] + allrates=[] + # chooses the first arbitrarily to extract zvals etc from + for i,g in enumerate(gs): + + s=ss[i] + g=gs[i] + name = names[i] + #figures.plot_grid(gs[i].rates,g.zvals,g.dmvals, + # name=opdir+name+"_zDM.pdf",norm=3,log=True, + # label='$\\log_{10} p({\\rm DM}_{\\rm IGM} + {\\rm DM}_{\\rm host},z)$ [a.u.]', + # project=False,ylabel='${\\rm DM}_{\\rm IGM} + {\\rm DM}_{\\rm host}$', + # zmax=zmax,DMmax=DMmax,Aconts=[0.01,0.1,0.5]) + + rates = gs[i].get_rates() #gs[i].rates * 10**g.state.FRBdemo.lC + rate = np.sum(rates)*multiplier + allrates.append(rate) + pz = np.sum(rates,axis=1) + pz /= dz + + pdm = np.sum(rates,axis=0) + pdm /= ddm + + pzs.append(pz) + pdms.append(pdm) + + inorm=3 + for i,g in enumerate(gs): + pz = pzs[i]/np.max(pzs[inorm]) + pdm = pdms[i]/np.max(pdms[inorm]) + + print("Rate for ",names[i]," is ",allrates[i], "(relative rate: ",allrates[i]/allrates[0],") per day") + + plt.sca(ax1) + plt.plot(zvals,pz,label=labels[i],linestyle=linestyles[i]) + + plt.sca(ax2) + plt.plot(dmvals,pdm,label=labels[i],linestyle=linestyles[i]) + + + + plt.sca(ax1) + plt.savefig("ATNF_2030/nolegend_improved_zs.png") + plt.legend(fontsize=12,loc="upper right") + plt.tight_layout() + plt.savefig("ATNF_2030/improved_zs.png") + plt.close() + + plt.sca(ax2) + plt.tight_layout() + plt.savefig("ATNF_2030/nolegend_improved_dms.png") + plt.legend(fontsize=12,loc="upper right") + plt.tight_layout() + plt.savefig("ATNF_2030/improved_dms.png") + plt.close() + +def plot_efficiencies(gs,ss): + """ + Does some efficiency plots + """ + ###### plots efficiencies ###### + plt.figure() + for i,s in enumerate(ss): + + for j in np.arange(s.NWbins): + if j==0: + plt.plot(s.dmvals,s.efficiencies[j,:],linestyle=linestyles[i],label=labels[i]) + else: + plt.plot(s.dmvals,s.efficiencies[j,:],linestyle=linestyles[i],color=plt.gca().lines[-1].get_color()) + plt.xlabel("DM") + plt.ylabel("Efficiency") + plt.legend() + plt.tight_layout() + plt.savefig("Plots/efficiency.png") + plt.close() + + + ##### Plots an example of the threshold ###### + plt.figure() + for i,g in enumerate(gs): + print("Survey weights are ",ss[i].wlist,ss[i].wplist) + for j in np.arange(g.nthresh): + if j==0: + plt.plot(g.dmvals,g.thresholds[j,10,:],linestyle=linestyles[i],label=labels[i],linewidth=0.2) + else: + plt.plot(g.dmvals,g.thresholds[j,10,:],linestyle=linestyles[i],color=plt.gca().lines[-1].get_color(),linewidth=j) + plt.xlabel("DM") + plt.ylabel("Threshold (erg)") + plt.legend() + plt.tight_layout() + plt.savefig("Plots/g_thresholds.png") + plt.close() + + +def check_FE(state): + """ + Checks FRB rate compared to Fly's Eye rate, which is the most reliable and consistent + """ + ###### Checks normalisation ###### + ss,gs = loading.surveys_and_grids( + survey_names=["CRAFT_class_I_and_II"],repeaters=False,init_state=state) # should be equal to actual number of FRBs, but for this purpose it doesn't matter + + rate = np.sum(gs[0].rates) * 10**gs[0].state.FRBdemo.lC * ss[0].TOBS + + print("Expected number for Fly's Eys is ",rate," per day") + print("c.f. actual number: ",ss[0].NORM_FRB) + + + +main() diff --git a/papers/Casatta/CASATTA MFAA SKA2 FRB estimates.csv b/papers/Casatta/CASATTA MFAA SKA2 FRB estimates.csv new file mode 100644 index 00000000..7fc0762b --- /dev/null +++ b/papers/Casatta/CASATTA MFAA SKA2 FRB estimates.csv @@ -0,0 +1,37 @@ +Array_name,SEFDJy,Tsys (K),Aeff (m^2),A/Tsys (m^2/K),FrequencyMHz,BandwidthMHz,FWHM_deg,Time_resolution_ms,Freq_res_MHz +AA-600MHz-1M-narrow,0.625,30,240000,8000,600,600,40,1,0.5 +AA-600MHz-256k-narrow,2.5,30,60000,2000,600,600,40,1,0.5 +AA-600MHz-64k-narrow,10,30,15000,500,600,600,40,1,0.5 +AA-600MHz-16k-narrow,40,30,3750,125,600,600,40,1,0.5 +AA-600MHz-4k-narrow,160,30,937.5,31.25,600,600,40,1,0.5 +AA-600MHz-1k-narrow,640,30,234.375,7.8125,600,600,40,1,0.5 +AA-600MHz-256-narrow,2560,30,58.59375,1.953125,600,600,40,1,0.5 +AA-600MHz-64-narrow,10240,30,14.6484375,0.48828125,600,600,40,1,0.5 +AA-600MHz-16-narrow,40960,30,3.662109375,0.1220703125,600,600,40,1,0.5 +AA-600MHz-1M-wide,0.625,30,240000,8000,600,600,120,1,0.5 +AA-600MHz-256k-wide,2.5,30,60000,2000,600,600,120,1,0.5 +AA-600MHz-64k-wide,10,30,15000,500,600,600,120,1,0.5 +AA-600MHz-16k-wide,40,30,3750,125,600,600,120,1,0.5 +AA-600MHz-4k-wide,160,30,937.5,31.25,600,600,120,1,0.5 +AA-600MHz-1k-wide,640,30,234.375,7.8125,600,600,120,1,0.5 +AA-600MHz-256-wide,2560,30,58.59375,1.953125,600,600,120,1,0.5 +AA-600MHz-64-wide,10240,30,14.6484375,0.48828125,600,600,120,1,0.5 +AA-600MHz-16-wide,40960,30,3.662109375,0.1220703125,600,600,120,1,0.5 +AA-1400MHz-1M-narrow,3.125,25,29600,1184,1400,1400,40,1,1 +AA-1400MHz-256k-narrow,12.5,25,7400,296,1400,1400,40,1,1 +AA-1400MHz-64k-narrow,50,25,1850,74,1400,1400,40,1,1 +AA-1400MHz-16k-narrow,200,25,462.5,18.5,1400,1400,40,1,1 +AA-1400MHz-4k-narrow,800,25,115.625,4.625,1400,1400,40,1,1 +AA-1400MHz-1k-narrow,3200,25,28.90625,1.15625,1400,1400,40,1,1 +AA-1400MHz-256-narrow,12800,25,7.2265625,0.2890625,1400,1400,40,1,1 +AA-1400MHz-64-narrow,51200,25,1.806640625,0.072265625,1400,1400,40,1,1 +AA-1400MHz-16-narrow,204800,25,0.4516601563,0.01806640625,1400,1400,40,1,1 +AA-1400MHz-1M-wide,3.125,25,29600,1184,1400,1400,120,1,1 +AA-1400MHz-256k-wide,12.5,25,7400,296,1400,1400,120,1,1 +AA-1400MHz-64k-wide,50,25,1850,74,1400,1400,120,1,1 +AA-1400MHz-16k-wide,200,25,462.5,18.5,1400,1400,120,1,1 +AA-1400MHz-4k-wide,800,25,115.625,4.625,1400,1400,120,1,1 +AA-1400MHz-1k-wide,3200,25,28.90625,1.15625,1400,1400,120,1,1 +AA-1400MHz-256-wide,12800,25,7.2265625,0.2890625,1400,1400,120,1,1 +AA-1400MHz-64-wide,51200,25,1.806640625,0.072265625,1400,1400,120,1,1 +AA-1400MHz-16-wide,204800,25,0.4516601563,0.01806640625,1400,1400,120,1,1 diff --git a/papers/Casatta/DSA_1600.ecsv b/papers/Casatta/DSA_1600.ecsv new file mode 100644 index 00000000..56087552 --- /dev/null +++ b/papers/Casatta/DSA_1600.ecsv @@ -0,0 +1,16 @@ +# %ECSV 1.0 +# --- +# datatype: +# - {name: TNS, datatype: string} +# - {name: DM, datatype: float64} +# - {name: DMG, unit: pc / cm3, datatype: float64} +# - {name: SNR, datatype: float64} +# - {name: Z, datatype: float64} +# meta: !!omap +# - {survey_data: '{"observing": {"MAX_LOC_DMEG": -1}, +# "telescope": {"BMETHOD": 0, "BTHRESH": 0.5, "DIAM": 6.0, +# "NBEAMS": 1,"NBINS": 5, "FBAR": 1350.0, "FRES": 0.244141, +# "TRES": 0.262144, "SNRTHRESH": 8.5, "BW": 1300.0, "THRESH": 0.013}}'} +# schema: astropy-2.0 +TNS DM DMG SNR Z +DUMMY 491.554 37.3 133.33 -1. diff --git a/papers/Casatta/README.txt b/papers/Casatta/README.txt new file mode 100644 index 00000000..943617be --- /dev/null +++ b/papers/Casatta/README.txt @@ -0,0 +1,3 @@ +These calculations are temporary calcs for the ATNF ASKAP science day + +Data provided by Nithya and Josh diff --git a/papers/Casatta/casatta_base.ecsv b/papers/Casatta/casatta_base.ecsv new file mode 100644 index 00000000..729ceb01 --- /dev/null +++ b/papers/Casatta/casatta_base.ecsv @@ -0,0 +1,14 @@ +# %ECSV 1.0 +# --- +# datatype: +# - {name: TNS, datatype: string} +# - {name: DM, datatype: float64} +# - {name: DMG, datatype: float64} +# - {name: SNR, datatype: float64} +# - {name: SNRTHRESH, datatype: float64} +# meta: !!omap +# - {survey_data: "{\n \"observing\": {\n \"NORM_FRB\": 1,\n \"TOBS\": 1\n },\n \"telescope\": {\n \ +# \ \"DIAM\": 40,\n \"BMETHOD\": 0,\n \"NBEAMS\": 1,\n \"NBINS\": 10\n }\n}"} +# schema: astropy-2.0 +TNS DM DMG SNR SNRTHRESH +DUMMY 200 30 10. 10. diff --git a/papers/Casatta/compare_dsa_casatta.py b/papers/Casatta/compare_dsa_casatta.py new file mode 100644 index 00000000..eccab508 --- /dev/null +++ b/papers/Casatta/compare_dsa_casatta.py @@ -0,0 +1,178 @@ +""" +Runs a simulation of DSA 1600, compartes that to CASATTA N... +""" +from scipy.interpolate import interp1d +import pandas as pd +import numpy as np +import importlib.resources as resources +import copy +import scipy.constants as constants +from matplotlib import pyplot as plt + +from zdm import states +from zdm import misc_functions as mf +from zdm import grid as zdm_grid +from zdm import survey +from zdm import pcosmic + + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + +def main(): + """ + + """ + # does exactly the same as "sim_casatta.py", just picks out the + # specific cases we want + + # loads + #df = read_casatta_params() + #nsims,ncols = df.shape + + + + ##### SIMULATES DSA 1600 as per CASATTA ######3 + + # state. Does not use updated scattering, because it takes a long time! + state = states.load_state("HoffmannHalo25")#scat="updated",rep=None) + + zDMgrid, zvals, dmvals = mf.get_zdm_grid( + state, new=True, plot=False, method='analytic', + datdir=resources.files('zdm').joinpath('GridData')) + + # we can keep this constant - it smears DM due to host DM + mask = pcosmic.get_dm_mask(dmvals, (state.host.lmean, state.host.lsigma), zvals, plot=False) + + renorm = get_constant(state,zDMgrid, zvals, dmvals, mask) + + sdir="." + survey_name = "DSA_1600" + + s = survey.load_survey(survey_name, state, dmvals, zvals=zvals, sdir=sdir) + + g = zdm_grid.Grid(s, copy.deepcopy(state), zDMgrid, zvals, dmvals, mask, wdist=True) + + + daily = np.sum(g.rates)* 10**state.FRBdemo.lC *renorm + + pz = np.sum(g.rates,axis=1)* 10**state.FRBdemo.lC *renorm + pdm = np.sum(g.rates,axis=0)* 10**state.FRBdemo.lC *renorm + dz1 = zvals[1]-zvals[0] + print("Daily DSA rate is ",daily) + ###### reads CASATTA ##### + + df = pd.read_csv("CASATTA MFAA SKA2 FRB estimates.csv") + + dailys = np.load("dailys.npy") + pzs = np.load("pzs.npy")*renorm + pdms = np.load("pdms.npy")*renorm + zvals2 = np.load("zvals.npy") + dmvals = np.load("dmvals.npy") + threshs = np.load("threshs.npy") + + + + # selects which casatta plot to use + OK1=np.where(df["FrequencyMHz"]==600) + OK2=np.where(df["FWHM_deg"]==120) + OK=np.intersect1d(OK1,OK2) + + dz2 = zvals2[1]-zvals2[0] + + # selects which casatta we want + ic = 13 + print(df["Array_name"][ic]," has daily rate ",dailys[ic]) + # plots! + + optdir = str(resources.files('zdm').joinpath('data/optical'))+"/" + + yf = np.load(optdir+"fz_23.0.npy") + xf = np.load(optdir+"zvals.npy") + itp = interp1d(xf,yf) + hfracs = itp(zvals) + + yf2 = np.load(optdir+"fz_24.7.npy") + xf2 = np.load(optdir+"zvals.npy") + itp2 = interp1d(xf2,yf2) + hfracs2 = itp2(zvals) + + + yf3 = np.load(optdir+"fz_27.5.npy") + xf3 = np.load(optdir+"zvals.npy") + itp3 = interp1d(xf3,yf3) + hfracs3 = itp3(zvals) + + plt.figure() + plt.xlim(0,5) + plt.ylim(0,550) + # multiplies by z-bin width + dz = zvals[1]-zvals[0] + plt.xlabel("z") + plt.ylabel("p(z) [FRBs / day / z]") + #plt.ylim(1e-1,1e7) + plt.plot(zvals,pz/dz1,label="DSA 1600: FRBs",color="red") + plt.fill_between(zvals,pz/dz1*hfracs,label=" hosts: DECaLS",color="red",alpha=0.5) + print("DSA hosts: ",np.sum(pz*hfracs)) + plt.plot(zvals,pzs[ic,:]/dz2,label="CASATTA 4000: FRBs",color="blue") + plt.fill_between(zvals,pzs[ic,:]/dz2*hfracs3,label=" hosts: LSST",color="blue",alpha=0.5) + print("CASATTA hosts: ",np.sum(pzs[ic,:]*hfracs3)) + plt.legend() + plt.tight_layout() + plt.savefig("dsa_pz_30_vs_27.5.png") + plt.close() + + + + plt.figure() + plt.xlim(0,5) + plt.ylim(0,550) + # multiplies by z-bin width + dz = zvals[1]-zvals[0] + plt.xlabel("z") + plt.ylabel("p(z) [FRBs / day / z]") + #plt.ylim(1e-1,1e7) + plt.plot(zvals,pz/dz1,label="DSA 1600: FRBs",color="red") + plt.fill_between(zvals,pz/dz1*hfracs2,label="DSA 1600: hosts",color="red",alpha=0.5) + print("DSA hosts: ",np.sum(pz*hfracs2)) + plt.plot(zvals,pzs[ic,:]/dz2,label="CASATTA 4000: FRBs",color="blue") + plt.fill_between(zvals,pzs[ic,:]/dz2*hfracs2,label="CASATTA 4000: hosts",color="blue",alpha=0.5) + print("CASATTA hosts: ",np.sum(pzs[ic,:]*hfracs2)) + plt.legend() + plt.tight_layout() + plt.savefig("dsa_pz_both_24.7.png") + plt.close() + + +def get_constant(state,zDMgrid, zvals, dmvals, mask): + """ + gets a normalising constant for this state + + Args: + df: dataframe containing info for this version of casatta + state: zdm state object + zDMgrid: underlying zDM grid giving p(DMcosmic|z) + zvals: redshift values of grid + dmvals: DM values of grid + mask: DM smearing mask for grid based on DMhost + """ + # I am here choosing to renomalise by the CRAFT ICS 892 MHz rates + #norm_survey = "CRAFT_class_I_and_II" + norm_survey = "CRAFT_ICS_892" + s = survey.load_survey(norm_survey, state, dmvals, zvals=zvals) + g = zdm_grid.Grid(s, copy.deepcopy(state), zDMgrid, zvals, dmvals, mask, wdist=True) + + predicted = np.sum(g.rates) * s.TOBS * 10**state.FRBdemo.lC + observed = s.NORM_FRB + + renorm = observed/predicted + print("Calculated renomalisation constant as ",renorm) + return renorm + +main() diff --git a/papers/Casatta/gen_atnf_plot.py b/papers/Casatta/gen_atnf_plot.py new file mode 100644 index 00000000..c374307e --- /dev/null +++ b/papers/Casatta/gen_atnf_plot.py @@ -0,0 +1,77 @@ + +import numpy as np +from matplotlib import pyplot as plt + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + + + +def main(): + """ + plots casatta simulation results + """ + + # this factor did NOT multiply pzs and pdms because I'm an idiot and forgot + # daily rates are correct + RENORM = 0.177 + + df = read_casatta_params() + nsims,ncols = df.shape + + + # selects which casatta plot to use + OK1=np.where(df["FrequencyMHz"]==600) + OK2=np.where(df["FWHM_deg"]==120) + OK=np.intersect1d(OK1,OK2) + + + dailys = np.load("dailys.npy")[OK] + pzs = np.load("pzs.npy")[OK,:] + pdms = np.load("pdms.npy")[OK,:] + pzs *= RENORM + pdms *= RENORM + zvals = np.load("zvals.npy") + dmvals = np.load("dmvals.npy") + threshs = np.load("threshs.npy")[OK] + + + # compares estimates from nominal figure of merit + FOM = threshs**-1.5 * df["FWHM_deg"][OK]**2 + + plt.figure() + plt.xlabel("FOM [FWHM$^2$ (Jy ms)$^{-1.5}$]") + plt.ylabel("Daily rate") + plt.xscale("log") + plt.yscale("log") + plt.scatter(FOM,dailys,label="CASATTA 600 MHz, FWHM=120 deg") + plt.ylim(1e-2,1e8) + plt.xlim(1,1e8) + + plt.plot([1e-2,1e8],[3e-4,3e6],color="black",label="1-1 line",linestyle="--") + plt.legend(loc="upper left") + plt.tight_layout() + plt.savefig("FOM_ATNF.png") + plt.close() + + +def read_casatta_params(infile="CASATTA MFAA SKA2 FRB estimates.csv"): + """ + Reads in casatta parameters + """ + + import pandas as pd + df = pd.read_csv(infile) + + return df + + +main() + + diff --git a/papers/Casatta/plot_casatta.py b/papers/Casatta/plot_casatta.py new file mode 100644 index 00000000..45d53aed --- /dev/null +++ b/papers/Casatta/plot_casatta.py @@ -0,0 +1,111 @@ + +import numpy as np +from matplotlib import pyplot as plt + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + + + +def main(): + """ + plots casatta simulation results + """ + + # this factor did NOT multiply pzs and pdms because I'm an idiot and forgot + # daily rates are correct + + df = read_casatta_params() + nsims,ncols = df.shape + + dailys = np.load("dailys.npy") + pzs = np.load("pzs.npy") + pdms = np.load("pdms.npy") + zvals = np.load("zvals.npy") + dmvals = np.load("dmvals.npy") + threshs = np.load("threshs.npy") + + # daily FRB rate for each config + plt.figure() + + plt.yscale('log') + plt.scatter(np.arange(nsims),dailys) + plt.xticks(np.arange(nsims),df["Array_name"],rotation=90,fontsize=6) + + plt.ylabel("Daily FRB rate") + plt.tight_layout() + plt.savefig("frb_rate_per_configuration.png") + plt.close() + + plt.figure() + plt.yscale("log") + + + # multiplies by z-bin width + dz = zvals[1]-zvals[0] + plt.xlabel("z") + plt.ylabel("p(z) [FRBs / day / z]") + plt.ylim(1e-1,1e7) + for isim in np.arange(nsims): + plt.plot(zvals,pzs[isim,:]/dz,label=df["Array_name"][isim]) + plt.legend(fontsize=4) + plt.tight_layout() + plt.savefig("all_pz.png") + plt.close() + + + plt.figure() + plt.yscale("log") + plt.ylim(1e-4,1e4) + # multiplies by DM width + ddm = dmvals[1]-dmvals[0] + + plt.xlabel("DM [pc cm$^{-3}$]") + plt.ylabel("p(DM) [FRBs /day /pc cm$^{-3}$]") + for isim in np.arange(nsims): + plt.plot(dmvals,pdms[isim,:]/ddm,label=df["Array_name"][isim]) + print("Daily rate for sim ",isim,": ",df["Array_name"][isim], " is ",dailys[isim]) + plt.legend(fontsize=4) + plt.tight_layout() + plt.savefig("all_pdm.png") + plt.close() + + + # compares estimates from nominal figure of merit + FOM = threshs**-1.5 * df["FWHM_deg"]**2 + + plt.figure() + plt.xlabel("FOM [FWHM$^2$ (Jy ms)$^{-1.5}$]") + plt.ylabel("Daily rate") + plt.xscale("log") + plt.yscale("log") + plt.scatter(FOM,dailys) + plt.ylim(1e-4,1e8) + + plt.plot([1e-2,1e8],[1e-3,1e7],color="black",label="1-1 line",linestyle="--") + plt.legend() + plt.tight_layout() + plt.savefig("FOM.png") + plt.close() + + +def read_casatta_params(infile="CASATTA MFAA SKA2 FRB estimates.csv"): + """ + Reads in casatta parameters + """ + + import pandas as pd + df = pd.read_csv(infile) + + return df + + +main() + + diff --git a/papers/Casatta/sim_casatta.py b/papers/Casatta/sim_casatta.py new file mode 100644 index 00000000..81cc2d8d --- /dev/null +++ b/papers/Casatta/sim_casatta.py @@ -0,0 +1,163 @@ + +import numpy as np +import importlib.resources as resources +import copy +import scipy.constants as constants +from matplotlib import pyplot as plt + +from zdm import states +from zdm import misc_functions as mf +from zdm import grid as zdm_grid +from zdm import survey +from zdm import pcosmic + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + + + +def main(): + """ + main file to simulate casatta sensitivity + """ + df = read_casatta_params() + nsims,ncols = df.shape + + # state. Does not use updated scattering, because it takes a long time! + state = states.load_state("HoffmannHalo25")#scat="updated",rep=None) + + zDMgrid, zvals, dmvals = mf.get_zdm_grid( + state, new=True, plot=False, method='analytic', + datdir=resources.files('zdm').joinpath('GridData')) + + # we can keep this constant - it smears DM due to host DM + mask = pcosmic.get_dm_mask(dmvals, (state.host.lmean, state.host.lsigma), zvals, plot=False) + + # gets constant of total FRB rate to normalise to + renorm = get_constant(state,zDMgrid,zvals,dmvals,mask) + + threshs = np.zeros([nsims]) + dailys = np.zeros([nsims]) + pzs = np.zeros([nsims,zvals.size]) + pdms = np.zeros([nsims,dmvals.size]) + + for isim in np.arange(nsims): + daily,pz,pdm,thresh = sim_casatta(df.iloc[isim],state,zDMgrid,zvals,dmvals,mask) + dailys[isim]=daily*renorm + pzs[isim,:]=pz*renorm + pdms[isim,:]=pdm*renorm + threshs[isim] = thresh + print("Done simulation ",isim, " of ", nsims,", daily rate ",daily*renorm) + + # modifies rates according to expectations + + np.save("threshs.npy",threshs) + np.save("dailys.npy",dailys) + np.save("pzs.npy",pzs) + np.save("pdms.npy",pdms) + np.save("zvals.npy",zvals) + np.save("dmvals.npy",dmvals) + + +def read_casatta_params(infile="CASATTA MFAA SKA2 FRB estimates.csv"): + """ + Reads in casatta parameters + """ + + import pandas as pd + df = pd.read_csv(infile) + + return df + +def sim_casatta(df,state,zDMgrid, zvals, dmvals, mask): + """ + simulates casatta for specific values given in dataframe + + Args: + df: dataframe containing info for this version of casatta + state: zdm state object + zDMgrid: underlying zDM grid giving p(DMcosmic|z) + zvals: redshift values of grid + dmvals: DM values of grid + mask: DM smearing mask for grid based on DMhost + """ + + # base name = casatta.ecsv + # directory where the base survey lives + sdir = resources.files('zdm').joinpath('../papers/Casatta/') + sdir = str(sdir) # convert to string, not annoying object + + ########### calculates relevant properties for CASATTA ####### + BW = df["BandwidthMHz"] + BWHz = BW*1e6 + tms = 1e-3 # seconds in one ms + + # SEFD in Jy. + NSAMP = 4.*BWHz *tms # 2 for polarisation, 2 for Nyquist, 1e3 for Jyms + nsigma=10 # S/N requirement for detection + THRESH = nsigma*df['SEFDJy']/NSAMP**0.5 + + FBAR = df["FrequencyMHz"] + tres = df["Time_resolution_ms"] + fres = df["Freq_res_MHz"] + + fMHz = df['FrequencyMHz'] + #print(df['Array_name'],df['SEFDJy'],NSAMP,THRESH) + + + # calculates what D to use + FWHM_rad = df["FWHM_deg"]*np.pi/180. + DIAM=1.22*(constants.c/(fMHz*1e6))/FWHM_rad + + # generates a survey dict to modify properties of this survey + # TOBS is one day + survey_dict = {"THRESH": THRESH, "TOBS": 1, "FBAR": float(fMHz), "BW": float(BW), "DIAM": DIAM, + "FRES": float(fres), "TRES": float(tres)} + + survey_name = "casatta_base" + s = survey.load_survey(survey_name, state, dmvals, zvals=zvals, survey_dict=survey_dict, sdir=sdir) + + g = zdm_grid.Grid(s, copy.deepcopy(state), zDMgrid, zvals, dmvals, mask, wdist=True) + + + daily = np.sum(g.rates)* 10**state.FRBdemo.lC + + pz = np.sum(g.rates,axis=1)* 10**state.FRBdemo.lC + pdm = np.sum(g.rates,axis=0)* 10**state.FRBdemo.lC + + return daily,pz,pdm,THRESH + +def get_constant(state,zDMgrid, zvals, dmvals, mask): + """ + gets a normalising constant for this state + + Args: + df: dataframe containing info for this version of casatta + state: zdm state object + zDMgrid: underlying zDM grid giving p(DMcosmic|z) + zvals: redshift values of grid + dmvals: DM values of grid + mask: DM smearing mask for grid based on DMhost + """ + # I am here choosing to renomalise by the CRAFT ICS 892 MHz rates + #norm_survey = "CRAFT_class_I_and_II" + norm_survey = "CRAFT_ICS_892" + s = survey.load_survey(norm_survey, state, dmvals, zvals=zvals) + g = zdm_grid.Grid(s, copy.deepcopy(state), zDMgrid, zvals, dmvals, mask, wdist=True) + + predicted = np.sum(g.rates) * s.TOBS * 10**state.FRBdemo.lC + observed = s.NORM_FRB + + renorm = observed/predicted + print("Calculated renomalisation constant as ",renorm) + return renorm + +main() + + diff --git a/papers/SKA_science/Deferral/calc_fom.py b/papers/SKA_science/Deferral/calc_fom.py new file mode 100644 index 00000000..7e611df8 --- /dev/null +++ b/papers/SKA_science/Deferral/calc_fom.py @@ -0,0 +1,161 @@ +import numpy as np +import pandas as pd +from matplotlib import pyplot as plt + + +def main(iTEL,fbeams,tag): + + print("\n\n\n\n\nGENERATING RESULTS FOR ",tag,"\n\n") + if iTEL==0: + options,labels = read_options() + + ####### gets original data ####### + all_station_datas = read_keane() + all_station_data = all_station_datas[iTEL] + + all_stations = all_station_data[0] + radii = all_station_data[1] + sens = all_station_data[2] + FOV = all_station_data[3] + + + # gets previous best radius (same for both configs) + prev_bests = ["C224","SKA041","SKA041"] # For AA4 + + sbest = prev_bests[iTEL] + ibest = np.where(all_stations == sbest)[0] + rmax = radii[ibest].values + print("Orig max radius is ",rmax," with ",ibest+1,"stations") + + + plt.figure() + + plt.plot([rmax,rmax],[0,512],color="black",linestyle=":") + plt.text(rmax*1.1,350,"Pre-deferral\noptimum",rotation=90,fontsize=12) + l1,=plt.plot(radii,np.arange(radii.size)+1,label="original AA4",color="black") + + # this step limits the size of the FOV to the HPBW + eff_rad = np.copy(radii.values) + toolow = np.where(eff_rad < rmax*fbeams**0.5)[0] + eff_rad[toolow] = rmax*fbeams**0.5 + + FOM = eff_rad**-2 * (np.arange(radii.size)+1)**1.5 + old_max = FOM[ibest] + ax1 = plt.gca() + ax2 = ax1.twinx() + + + if iTEL > 0: + imax = np.argmax(FOM[1:])+1 + rmax = radii[imax] + newmax = fbeams * FOM[imax] + print("We find a new peak maximum at r = ",rmax," using ",imax+1," antennas", "reduction of ",newmax/old_max) + l2,=ax2.plot(radii,FOM/FOM[ibest]*fbeams,linestyle="--",label="Relative (deferral)") + + + + + #ax2.plot(radii,FOM,color="black",linestyle="--") + + # loop over all options + # plots graph of radius vs antenna number for each options + if iTEL==0: + for i,option in enumerate(options): + matches = identify_present(option,all_stations) + nstations = len(matches) + stations = np.arange(nstations)+1 + plot_r = np.array(radii[matches].values) + ax1.plot(plot_r,stations,label=labels[i]) + + FOM = stations**1.5 / plot_r**2 + FOM_max = np.max(FOM[1:]) + Nmax = np.argmax(FOM[1:])+1 + rmax = plot_r[Nmax] + new_max = FOM_max * fbeams + + ax2.plot(plot_r[1:],fbeams*FOM[1:]/old_max,color=ax1.lines[-1].get_color(),linestyle="--") + + Nless = np.where(plot_r <= rmax)[0][-1] + print("Options ",i," Number of stations included is ",Nless+1) + print("New FOM is ",new_max/old_max," of old efficiency at rmax = ",rmax, Nmax+1) + plt.sca(ax2) + plt.ylabel("Fraction of AA4 FOM") + plt.sca(ax1) + plt.ylim(0,512) + plt.xscale('log') + + if iTEL==0: + plt.ylabel("Number of stations") + plt.legend() + else: + plt.ylabel("Number of antennas") + plt.legend(handles=[l1,l2],labels=["Array density","Relative FOM (deferral)"]) + plt.xlabel("Radius [km]") + plt.tight_layout() + plt.savefig(tag+"stations_vs_radius.png") + plt.close() + + +def get_optimum(): + """ + Gets optimum trade-off + """ + +def identify_present(subset,full_list): + """ + identifies which subset is present, i.e., if all antennas are actually there + """ + + ns = len(subset) + matches = np.zeros([ns],dtype='int') + + for i,station in enumerate(subset): + + match = np.where(full_list == station)[0] + if len(match)==0: + print("could not find station ",station) + continue + matches[i] = match + + sort_matches = np.sort(matches) + + return sort_matches + +def read_keane(): + """ + reads Evan's info + """ + + files = ["../inputs/LowAA4_ID_radius_AonT_FoVdeg2","../inputs/Band1AA4_ID_radius_AonT_FoVdeg2","../inputs/Band2AA4_ID_radius_AonT_FoVdeg2"] + #files = ["../inputs/LowAAstar_ID_radius_AonT_FoVdeg2","../inputs/Band1AAstar_ID_radius_AonT_FoVdeg2","../inputs/Band2AAstar_ID_radius_AonT_FoVdeg2"] + + + #data = np.loadtxt(f,dtype='string') + datas=[] + for f in files: + data = pd.read_csv(f, sep='\s+', header=None) + datas.append(data) + return datas + +def read_options(): + """ + reads in options + """ + + options = ["option_5.txt","option_5.2.txt","option_7.txt"] + option_labels = ["option 5","option 5.2","option 7"] + stations=[] + for i,option in enumerate(options): + with open(option, 'r') as f: + for line in f: + slist = line.split(',') + slist[-1] = slist[-1][:-1] # to avoid last \n + stations.append(slist) + return stations,option_labels + +fbeams = [50./250,200./1125,200./1125] + +labels=["Low","Mid_band1","mid_band2"] +for iTEL in np.arange(3): + main(iTEL,fbeams[iTEL],labels[iTEL]) + diff --git a/papers/SKA_science/Deferral/option_5.2.txt b/papers/SKA_science/Deferral/option_5.2.txt new file mode 100644 index 00000000..82c14add --- /dev/null +++ b/papers/SKA_science/Deferral/option_5.2.txt @@ -0,0 +1 @@ +C1,C10,C100,C103,C108,C11,C111,C112,C113,C117,C12,C120,C121,C123,C124,C125,C126,C128,C13,C130,C132,C138,C139,C14,C141,C142,C143,C144,C145,C147,C15,C153,C156,C158,C16,C161,C162,C163,C164,C167,C168,C17,C170,C171,C172,C173,C175,C176,C177,C179,C18,C181,C184,C187,C19,C190,C191,C193,C194,C197,C198,C199,C2,C20,C200,C201,C203,C204,C206,C208,C212,C214,C217,C219,C22,C23,C24,C25,C26,C27,C28,C29,C3,C30,C31,C32,C33,C34,C35,C36,C37,C38,C39,C4,C41,C42,C43,C44,C45,C46,C47,C48,C49,C5,C50,C51,C52,C53,C54,C55,C56,C57,C58,C59,C6,C60,C61,C62,C63,C64,C65,C66,C67,C68,C69,C7,C70,C71,C72,C73,C74,C75,C76,C77,C78,C79,C8,C81,C82,C83,C84,C86,C87,C88,C89,C9,C91,C98,C99,E1-3,E1-4,E10-1,E10-2,E10-3,E10-4,E13-1,E13-2,E13-3,E13-4,E15-1,E15-2,E15-3,E15-4,E16-1,E16-2,E16-3,E16-4,E2-1,E2-2,E2-3,E3-1,E3-3,E3-4,E4-1,E4-2,E4-3,E8-1,E8-2,E8-3,E8-4,E9-1,E9-2,E9-3,E9-4,N1-1,N1-6,N10-1,N10-2,N10-3,N10-4,N13-1,N13-2,N13-3,N13-4,N15-1,N15-2,N15-3,N15-4,N16-1,N16-2,N16-3,N16-4,N2-1,N2-2,N2-3,N3-1,N3-2,N3-3,N4-1,N4-2,N4-3,N8-1,N8-2,N8-3,N8-4,N9-1,N9-2,N9-3,N9-4,S1-1,S1-2,S10-1,S10-2,S10-3,S10-4,S10-5,S10-6,S13-1,S13-2,S13-3,S13-4,S15-1,S15-2,S15-3,S15-4,S16-1,S16-2,S16-3,S16-4,S2-4,S2-5,S2-6,S3-2,S3-4,S4-2,S4-3,S4-6,S8-1,S8-2,S8-3,S8-4,S8-5,S8-6,S9-1,S9-2,S9-3,S9-4 diff --git a/papers/SKA_science/Deferral/option_5.txt b/papers/SKA_science/Deferral/option_5.txt new file mode 100644 index 00000000..252374bb --- /dev/null +++ b/papers/SKA_science/Deferral/option_5.txt @@ -0,0 +1 @@ +C1,C10,C100,C103,C108,C11,C111,C112,C113,C117,C12,C120,C121,C123,C124,C125,C126,C128,C13,C130,C132,C138,C139,C14,C141,C142,C143,C144,C145,C147,C15,C153,C156,C158,C16,C161,C162,C163,C164,C167,C168,C17,C170,C171,C172,C173,C175,C176,C177,C179,C18,C181,C184,C187,C19,C190,C191,C193,C194,C197,C198,C199,C2,C20,C200,C201,C203,C204,C206,C208,C212,C214,C216,C219,C22,C23,C24,C25,C26,C27,C28,C29,C3,C30,C31,C32,C33,C34,C35,C36,C37,C38,C39,C4,C41,C42,C43,C44,C45,C46,C47,C48,C49,C5,C50,C51,C52,C53,C54,C55,C56,C57,C58,C59,C6,C60,C61,C62,C63,C64,C65,C66,C67,C68,C69,C7,C70,C71,C72,C73,C74,C75,C76,C77,C78,C79,C8,C81,C82,C83,C84,C86,C87,C88,C89,C9,C91,C98,C99,E1-1,E1-2,E10-1,E10-2,E10-3,E10-4,E13-1,E13-2,E13-3,E13-4,E15-1,E15-2,E15-3,E15-4,E16-1,E16-2,E16-3,E16-4,E2-1,E2-2,E2-3,E3-1,E3-2,E3-3,E4-1,E4-2,E4-3,E8-1,E8-2,E8-3,E8-4,E9-1,E9-2,E9-3,E9-4,N1-1,N1-2,N10-1,N10-2,N10-3,N10-4,N13-1,N13-2,N13-3,N13-4,N15-1,N15-2,N15-3,N15-4,N16-1,N16-2,N16-3,N16-4,N2-1,N2-2,N2-3,N3-1,N3-2,N3-3,N4-1,N4-2,N4-3,N8-1,N8-2,N8-3,N8-4,N9-1,N9-2,N9-3,N9-4,S1-1,S1-2,S10-1,S10-2,S10-3,S10-4,S10-5,S10-6,S13-1,S13-2,S13-3,S13-4,S15-1,S15-2,S15-3,S15-4,S16-1,S16-2,S16-3,S16-4,S2-1,S2-2,S2-3,S3-1,S3-2,S4-1,S4-2,S4-3,S8-1,S8-2,S8-3,S8-4,S8-5,S8-6,S9-1,S9-2,S9-3,S9-4 diff --git a/papers/SKA_science/Deferral/option_7.txt b/papers/SKA_science/Deferral/option_7.txt new file mode 100644 index 00000000..fb96e993 --- /dev/null +++ b/papers/SKA_science/Deferral/option_7.txt @@ -0,0 +1 @@ +C1,C10,C100,C103,C108,C11,C111,C112,C113,C117,C12,C120,C121,C123,C124,C125,C126,C128,C13,C130,C132,C138,C139,C14,C141,C142,C143,C144,C145,C147,C15,C153,C156,C158,C16,C161,C162,C163,C164,C167,C168,C17,C170,C171,C172,C173,C175,C176,C177,C179,C18,C181,C184,C187,C19,C190,C191,C193,C194,C197,C198,C199,C2,C20,C200,C201,C203,C204,C205,C206,C208,C21,C212,C214,C217,C219,C22,N4-1,N4-3,C23,C24,C25,C26,C28,C29,C3,C31,C33,C35,C36,C37,C38,C39,C4,C41,C42,C43,C45,C46,C47,C48,C49,C5,C50,C52,C53,C54,C55,C56,C57,C58,C59,C6,C60,C61,C62,C63,C65,C66,C67,C68,C69,C7,C70,C71,C72,C73,C74,C75,C76,C77,C79,C8,C81,C82,C83,C84,C86,C87,C88,C9,C91,C98,C99,E1-3,E1-4,S4-6,E10-1,E10-2,E10-3,E10-4,E13-1,E13-2,E13-3,E13-4,E15-1,E15-2,E15-3,E15-4,E16-1,E16-2,E16-3,E16-4,E2-2,E2-3,E2-4,E3-1,E3-3,E3-4,E4-1,E4-2,E4-3,E8-1,E8-2,E8-3,E8-4,E9-1,E9-2,E9-3,E9-4,N1-1,N1-6,N10-1,N10-2,N10-3,N10-4,N13-1,N13-2,N13-3,N13-4,N15-1,N15-2,N15-3,N15-4,N16-1,N16-2,N16-3,N16-4,N2-1,N2-2,N2-3,N3-1,N3-2,N3-3,N4-2,N8-1,N8-2,N8-3,N8-4,N9-1,N9-2,N9-3,N9-4,S1-1,S1-2,S3-2,C222,C223,C94,C136,S10-1,S10-2,S10-3,S10-4,S10-5,S10-6,S13-1,S13-2,S13-3,S13-4,S15-1,S15-2,S15-3,S15-4,S16-1,S16-2,S16-3,S16-4,S2-4,S2-5,S2-6,S3-4,S4-2,S4-3,C64,C89,C78,S8-1,S8-2,S8-3,S8-4,S8-5,S8-6,S9-1,S9-2,S9-3,S9-4 diff --git a/papers/Scattering/CRAFT_ICS_HTR_Catalogue1.csv b/papers/Scattering/CRAFT_ICS_HTR_Catalogue1.csv new file mode 100644 index 00000000..610410ca --- /dev/null +++ b/papers/Scattering/CRAFT_ICS_HTR_Catalogue1.csv @@ -0,0 +1,37 @@ +,TNS,Nubar,Nant,SNdet,SNoff,DM,DMErr,DMstruct,DMstructErr,Wsnr,W95,Z,TauObs,TauObsErr,TauAlpha,TauAlphaErr,NUTau,Tau1GHz,Tau1GHzErr,Components,L/I,L/IErr,V/I,V/IErr,P/I,P/IErr,RM,RMErr,RMMW,RMMWErr,Calibrator,PATrend,m,NU_c,NU_dc,NU_dcErr,Alpha_dc,Alpha_dcErr,nudctau,nudctauErr,LzLgMax,LzLgMaxErr +0,20180924B,1320.0,24,21.1,77.0,361.75,0.015,361.74,0.14,0.91,2.0,0.3214,0.59,0.01,-3.68,0.04,1297.5,1.56,0.13,s,0.89,0.02,0.09,0.02,0.9,0.02,17.3,0.8,16.5,5.0,VELA,0.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +1,20181112A,1297.5,11,19.3,220.0,589.265,0.001,589.265,0.001,0.1,1.2,0.4755,0.023,0.002,-2.0,0.3,1297.5,0.039,0.006,m,0.9,0.0,0.1,0.0,0.9,0.0,10.5,0.4,16.2,5.9,VELA,2.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +2,20190102C,1297.5,23,14.0,167.0,364.55,0.015,364.55,0.02,0.076,1.25,0.29,0.027,0.012,-5.5,1.0,1271.5,0.09,0.06,m,0.96,0.02,0.02,0.02,0.97,0.02,-106.1,0.9,26.6,7.7,VELA,2.0,0.41,1271.5,0.6,0.3,10.0,9999.0,101.0,68.0,9999.0,9999.0 +3,20190608B,1271.5,25,16.1,41.0,340.0,1.0,338.7,0.9,4.95,10.8,0.1178,3.83,0.15,-3.37,1.3,1269.5,8.5,1.2,s,1.0,0.04,0.02,0.02,1.0,0.04,353.0,1.0,-24.4,13.3,VELA,2.0,0.78,1271.5,1.4,0.1,5.8,0.5,33700.0,2700.0,6.6,0.5 +4,20190611B,1271.5,20,9.5,27.0,322.4,0.1,322.7,0.34,0.076,1.592,0.378,0.03,0.015,0.3,2.0,1252.7,0.03,0.02,m,0.74,0.04,0.29,0.04,0.8,0.04,17.0,3.0,29.0,10.8,VELA,2.0,0.96,1271.5,1.5,0.2,-2.0,1.0,282.0,146.0,9999.0,9999.0 +5,20190711A,1271.5,28,23.8,46.0,592.0,2.0,587.74,0.025,8.6,10.99,0.522,0.0076,0.002,-2.5,1.1,1172.9,0.011,0.005,m,0.98,0.03,0.14,0.02,0.99,0.03,4.0,1.0,19.4,6.5,VELA,2.0,0.64,1136.9,0.11,0.01,-10.0,5.0,5.2,1.5,9999.0,9999.0 +6,20190714A,1271.5,28,10.7,52.0,504.7,0.3,504.13,0.4,0.86,2.99,0.2365,0.422,0.008,-2.7,0.6,1286.6,0.83,0.05,s,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,VELA,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +7,20191001A,919.5,30,37.1,108.0,507.0,0.3,507.0,0.65,5.3,13.468,0.23,4.52,0.03,-4.85,0.3,826.4,1.78,0.04,s,0.53,0.01,0.05,0.01,0.54,0.01,51.1,0.4,23.5,4.3,VELA,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +8,20191228B,1271.5,21,22.9,74.0,297.0,1.0,296.0,2.0,7.8,13.596,0.2432,5.85,0.2,-3.6,0.6,1273.0,14.0,1.0,s,0.93,0.02,0.1,0.02,0.94,0.02,11.9,0.9,18.2,6.1,VELA,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +9,20200430A,863.5,26,13.9,67.0,379.9,0.55,379.6,0.55,11.0,22.68,0.161,6.5,0.15,-1.45,0.2,863.5,5.25,0.25,s,0.43,0.02,0.04,0.02,0.43,0.02,195.3,0.7,14.5,7.0,2045-1616,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +10,20200906A,864.5,7,16.1,347.0,577.8,0.2,577.81,0.01,0.057,0.128,0.3688,0.0315,0.0007,-4.5,0.4,846.4,0.0148,0.0004,s,0.8,0.005,0.073,0.004,0.804,0.005,75.47,0.08,30.3,19.8,VELA,2.0,0.92,1271.5,1.99,0.01,3.0,1.0,394.0,9.0,11600.0,300.0 +11,20210117A,1271.5,21,17.7,112.0,729.2,0.15,729.1,0.35,1.24,3.584,0.214,0.25,0.2,5.0,8.0,1274.5,0.15,0.1,m,0.92,0.02,0.05,0.01,0.92,0.02,-45.8,0.7,3.3,9.2,VELA,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +12,20210320C,863.5,24,15.3,161.0,384.6,0.02,384.59,0.02,0.381,0.884,0.28,0.193,0.007,-4.4,0.1,828.4,0.084,0.004,m,0.86,0.008,0.117,0.006,0.868,0.008,288.8,0.2,-2.8,5.7,1644,1.0,0.83,824.2,0.91,0.03,2.0,1.0,480.0,28.0,1150.0,70.0 +13,20210407E,1271.5,15,19.1,131.0,1784.8,0.2,1784.9,0.35,0.743,1.62,9999.0,0.09,0.02,-1.2,1.6,1219.8,0.08,0.03,m,0.97,0.01,0.09,0.01,0.98,0.01,-9.1,0.6,-59.6,28.6,VELA,2.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +14,20210912A,1271.5,22,31.2,479.0,1233.72,0.015,1233.7,0.025,0.095,1.612,9999.0,0.048,0.008,-2.5,0.9,1275.6,0.09,0.03,m,0.625,0.005,0.37,0.005,0.726,0.004,6.0,0.5,8.4,3.8,VELA,2.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +15,20211127I,1271.5,24,37.9,340.0,234.83,0.08,234.86,7.02,0.229,0.483,0.046946,0.025,0.02,0.0,5.5,1272.5,0.02,0.02,m,0.244,0.003,0.129,0.003,0.276,0.003,-67.0,1.0,-2.9,6.2,N/A,2.0,0.74,1271.5,2.88,0.09,3.3,0.2,450.0,360.0,9999.0,9999.0 +16,20211203C,920.5,24,14.2,47.0,636.2,0.4,635.16,0.935,12.4,25.449,0.3439,1.66,0.16,-9.7,2.4,891.4,0.55,0.1,s,0.57,0.02,0.07,0.03,0.58,0.02,34.3,1.2,-29.2,9.1,1644,0.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +17,20211212A,1631.5,24,10.5,45.0,200.0,1.5,200.0,3.5,2.1,5.628,0.0707,1.8,0.1,-2.8,2.3,1490.8,8.0,6.0,s,0.47,0.02,0.09,0.02,0.48,0.02,21.0,7.0,6.0,5.7,1644,0.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +18,20220105A,1631.5,22,9.8,42.0,583.0,2.0,581.5,3.35,0.95,2.25,0.2785,0.43,0.01,-2.0,0.8,1649.8,1.2,0.5,m,0.3,0.03,0.05,0.03,0.3,0.03,-1312.0,8.0,3.9,1.5,1644,0.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +19,20220501C,863.5,23,14.8,79.0,449.6,0.3,449.1,0.25,6.1,6.9,0.381,0.35,0.25,4.0,8.0,864.5,0.43,0.3,m,0.68,0.02,0.06,0.02,0.69,0.02,35.5,0.3,9.6,4.5,VELA,2.0,0.44,863.5,9.2,0.5,-1.0,2.0,20200.0,14500.0,9999.0,9999.0 +20,20220610A,1271.5,22,23.9,62.0,1458.1,0.4,1457.6,0.85,1.07,2.0,1.015,0.521,0.001,-3.56,0.03,1149.4,0.855,0.008,s,0.98,0.01,0.065,0.007,0.99,0.01,217.0,2.0,11.9,4.9,VELA,0.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +21,20220725A,919.5,25,10.9,49.0,290.1,0.35,290.0,0.25,3.43,8.016,0.1926,2.29,0.05,-1.94,0.06,1149.4,1.95,0.05,a,0.58,0.02,0.13,0.03,0.6,0.02,-26.3,0.7,-190.7,49.8,VELA,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +22,20220918A,1271.5,24,26.3,21.0,643.0,5.5,660.0,25.0,11.43,13.851,0.491,7.66,0.1,-2.1,0.03,1133.5,10.0,1.1,s,0.15,0.01,0.13,0.02,0.19,0.02,559.0,23.0,14.6,9.8,VELA,0.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +23,20221106A,1631.5,21,19.7,133.0,343.2,0.8,343.0,0.3,5.33,6.895,0.2044,0.182,0.006,-0.7,1.3,1649.6,0.25,0.09,s,0.862,0.008,0.078,0.006,0.865,0.008,444.0,1.0,34.7,11.4,VELA,2.0,0.84,1631.5,2.0,1.0,1.4,0.5,2290.0,1150.0,9999.0,9999.0 +24,20230526A,1271.5,22,22.1,88.0,316.2,0.2,316.1,0.2,2.0,2.7,0.157,1.16,0.01,-3.6,0.3,1272.2,2.75,0.1,a,0.391,0.008,0.04,0.008,0.393,0.008,613.0,2.0,9.7,6.1,VELA,2.0,0.81,1271.5,2.6,0.1,-6.0,4.0,18950.0,750.0,63.0,2.0 +25,20230708A,919.5,23,30.5,270.0,411.54,0.045,411.52,0.065,1.14,23.578,0.105,0.24,0.02,-2.84,0.4,920.5,0.21,0.005,m,0.95,0.01,0.39,0.01,1.03,0.01,-7.5,0.4,43.6,10.5,VELA,2.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +26,20230718A,1271.5,22,10.9,104.0,476.67,0.09,476.64,0.15,0.55,0.695,0.035,0.117,0.005,-1.6,0.7,1272.2,0.17,0.02,m,0.92,0.02,0.11,0.01,0.92,0.02,243.1,0.6,186.4,50.4,1644,0.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +27,20230731A,1271.5,25,16.6,61.0,701.1,0.3,700.73,0.45,0.65,2.655,9999.0,0.45,0.05,-2.3,0.6,1271.8,0.78,0.04,a,0.42,0.02,0.23,0.02,0.48,0.02,268.0,5.0,213.6,67.3,?????,1.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +28,20230902A,832.5,22,11.8,113.0,440.1,0.1,440.166,0.02,0.229,0.678,0.3619,0.123,0.002,-2.55,0.08,812.4,0.072,0.002,m,0.91,0.01,0.05,0.01,0.91,0.01,164.8,0.2,10.1,6.3,VELA,1.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +29,20231226A,863.5,22,36.7,96.0,329.9,0.1,328.73,1.37,5.3,9.72,9999.0,0.1,0.07,-1.0,3.0,762.8,0.25,0.2,m,0.86,0.02,0.04,0.01,0.86,0.02,428.4,0.3,13.0,6.8,VELA,2.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +30,20240201A,920.5,24,13.9,63.0,374.5,0.2,373.514,0.35,3.05,3.901,0.042729,0.78,0.04,-3.9,0.5,915.5,0.46,0.06,m,0.76,0.02,0.09,0.02,0.76,0.02,1275.0,0.4,5.9,6.5,1644,2.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +31,20240208A,863.5,14,12.1,21.0,260.2,0.3,259.83,0.12,1.7,10.0,9999.0,1.35,0.25,-2.7,2.1,864.1,1.0,0.45,s,0.94,0.09,0.08,0.08,0.94,0.09,-73.7,1.4,3.9,6.3,1644,0.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +32,20240210A,863.5,23,11.6,59.0,283.73,0.05,283.97,0.03,0.305,1.539,0.023686,0.1,0.03,-3.6,0.3,863.5,0.59,0.04,m,0.73,0.02,0.14,0.02,0.74,0.02,-325.0,1.0,0.8,3.4,VELA,2.0,0.71,863.5,2.7,0.1,1.0,2.0,1700.0,500.0,58.0,17.0 +33,20240304A,832.5,24,12.3,44.0,652.6,0.5,653.4,4.35,8.57,19.0,9999.0,2.51,0.12,3.5,1.3,877.0,4.0,0.5,s,0.92,0.03,0.04,0.02,0.92,0.03,489.7,0.8,2.4,5.1,1644,2.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +34,20240310A,902.5,25,19.1,40.0,601.8,0.2,601.76,0.855,4.19,13.493,0.127,2.23,0.07,-3.23,0.5,846.4,1.3,0.13,s,0.72,0.03,0.09,0.03,0.72,0.03,-1709.2,1.1,-4.8,7.4,VELA,2.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0,9999.0 +35,20240318A,902.5,23,13.2,119.0,256.4,0.3,256.18,0.015,0.286,0.837,9999.0,0.163,0.01,-3.32,0.005,920.5,0.128,0.005,m,0.8,0.02,0.13,0.01,0.81,0.02,-48.5,0.3,14.3,2.3,1644,1.0,0.8,919.5,4.1,0.2,1.0,1.0,4200.0,330.0,9999.0,9999.0 diff --git a/papers/Scattering/fit_scattering_width.py b/papers/Scattering/fit_scattering_width.py index af1623de..1fc58218 100644 --- a/papers/Scattering/fit_scattering_width.py +++ b/papers/Scattering/fit_scattering_width.py @@ -99,23 +99,118 @@ # CHIME values # raw were +# mu_tau, sigmatau in ln and 600 was: 2.02 ms, in log10 that's 0.3. Scaled to 1 GHz from 600 MHz is down by 0.6^4 = -0.88 so basically -0.57 +# sigma_tau at 600 was: 1.72. * log10(e) gives 0.75 + +# for width, it's 1.0 ms, 0.97, which is frequency independent, and becomes 0 + # scaled to 1GHz and log10 are # $(\mu_w,\sigma_w) = (\log_{10} 1.0 {\rm ms},0.42)$ # $(\mu_\tau,\sigma_\tau) = (\log_{10} 0.262 {\rm ms},0.75) CHIME_muw = 0. CHIME_sw = 0.42 -CHIME_mut = 0.3 + +CHIME_mut = 0.3 # scaled to 1 GHz is -0.58 CHIME_st = 0.75 -def main(outdir="Fitting_Outputs/"): - # does k-s test to scattering distribution + + +def plot_good_alpha(outdir,alphas,alphaerr,tauobs,tauobserr): + """ + makes diagnostic plot of good alpha values + """ + + OK1 = np.array(np.where(alphaerr < 1.)[0]) + + bins = np.linspace(-6,0,13) + plt.figure() + plt.xlabel("$\\alpha$") + plt.ylabel("$N(\\alpha)$") + plt.hist(alphas[OK1],bins=bins) + plt.tight_layout() + plt.savefig(outdir+"alphahist1.png") + plt.close() + + OK2 = np.where(np.abs(tauobserr[OK1]/tauobs[OK1])<0.1)[0] + OK3 = OK1[OK2] + + plt.figure() + plt.xlabel("$\\alpha$") + plt.ylabel("$N(\\alpha)$") + plt.hist(alphas[OK3],bins=bins) + plt.tight_layout() + plt.savefig(outdir+"alphahist3.png") + plt.close() + + plt.xlabel("$\\alpha$") + plt.ylabel("$\\sigma_{\\rm alpha}$") + plt.scatter(alphas,alphaerr) + plt.tight_layout() + plt.savefig(outdir+"alpha_err_scat.png") + plt.close() + + plt.xlabel("$\\alpha$") + plt.ylabel("$\\sigma_{\\rm alpha}$") + plt.scatter(alphas[OK3],alphaerr[OK3]) + plt.tight_layout() + plt.savefig(outdir+"alpha_err_scat3.png") + plt.close() + + + plt.xlabel("$\\tau$ [ms]") + plt.ylabel("$\\sigma_{\\rm tau}/\\tau$") + plt.xscale("log") + plt.yscale("log") + plt.scatter(tauobs,tauobserr/tauobs) + plt.tight_layout() + plt.savefig(outdir+"tau_obs_err.png") + plt.close() + +def fit_scat_width(outdir="Fitting_Outputs/",bootstrap=False,alpha=-4,doplots=False,bsalpha=False): + """ + + + Args: + outdir (string): directory to send outputs to + bootstrap (bool): add random errors to tau + alpha (float): use this as standard value of nu^\alpha, except if bsalpha is True + doplots (bool): generate publication plots + bsalpha (bool): add random variation to individual alphas + """ + + # recordss this value + mean_alpha=alpha if not os.path.exists(outdir): os.mkdir(outdir) - plot_functions(outdir=outdir) + if doplots: + plot_functions(outdir=outdir) + + tns,tauobs,w95,wsnr,z,snr,freq,DM,tres,tauobserr,taualpha,taualphaerr = get_data() + + + if doplots: + plot_good_alpha(outdir,taualpha,taualphaerr,tauobs,tauobserr) + - tns,tauobs,w95,wsnr,z,snr,freq,DM,tres = get_data() + # performs a bootstrap step to estimate uncertainties + if bootstrap: + tausim = tauobs + tauobserr*np.random.normal(0,1,tauobserr.size) + # resamples things which are -ve + while True: + neg = np.where(tausim < 0.)[0] + if len(neg)==0: + break + tausim[neg] = tauobs[neg]*np.random.normal(0,1,len(neg)) + tauobs = tausim + + if bsalpha: + # samples according to error for good measurements, and ~3 for poor ones + OK = np.where(taualphaerr < 1.)[0] + BAD = np.where(taualphaerr >= 1.)[0] + alpha = taualpha + taualphaerr*np.random.normal(0,1,taualpha.size) + alpha[BAD] = -np.random.normal(3,1,len(BAD)) # gets observed intrinsic widths wobs = wsnr**2 - (tauobs/0.816)**2 @@ -128,19 +223,20 @@ def main(outdir="Fitting_Outputs/"): wobs = wobs**0.5 # generates scatter plot of tau and width - plt.figure() - plt.scatter(wsnr,tauobs) - plt.xlabel("$w_{\\rm SNR}$") - plt.ylabel("$\\tau_{\\rm obs}$") - plt.xscale("log") - plt.yscale("log") - slope = 0.816 - x=np.array([1e-2,20]) - y=slope*x - plt.plot(x,y,linestyle="--",color="black") - plt.tight_layout() - plt.savefig(outdir+"scatter_w_tau.png") - plt.close() + if doplots: + plt.figure() + plt.scatter(wsnr,tauobs) + plt.xlabel("$w_{\\rm SNR}$") + plt.ylabel("$\\tau_{\\rm obs}$") + plt.xscale("log") + plt.yscale("log") + slope = 0.816 + x=np.array([1e-2,20]) + y=slope*x + plt.plot(x,y,linestyle="--",color="black") + plt.tight_layout() + plt.savefig(outdir+"scatter_w_tau.png") + plt.close() NFRB = snr.size @@ -157,9 +253,8 @@ def main(outdir="Fitting_Outputs/"): maxws[i] = wmax # scale taus to 1 GHz in host rest frame - alpha = -4 - host_tau = tauobs*(1+z)**3 * (freq/1e3)**-alpha - host_maxtaus = maxtaus * (1+z)**3 * (freq/1e3)**-alpha + host_tau = tauobs*(1+z)**(-alpha-1) * (freq/1e3)**-alpha + host_maxtaus = maxtaus * (1+z)**(-alpha-1) * (freq/1e3)**-alpha host_w = wobs/(1+z) host_maxw = maxws/(1+z) @@ -167,29 +262,25 @@ def main(outdir="Fitting_Outputs/"): # print(i,z[i],maxws[i],host_maxw[i]) - - - - - - # generates a table - print("Table of FRB properties for latex") - for i,name in enumerate(tns): - string=name - # detection properties - string += " & " + str(int(DM[i]+0.5)) + " & " + str(z[i])[0:6] + " & " + str(snr[i])[0:4] - string += " & " + str(freq[i])[0:6] + " & " + str(tres[i])[0:6] + " & " + str(w95[i])[0:4] - - # scattering - string += " & " + str(tauobs[i])[0:4] + " & " + str(maxtaus[i])[0:4] + " & " + str(host_tau[i])[0:4] + " & " + str(host_maxtaus[i])[0:4] - - # width - string += " & " + str(wobs[i])[0:4] + " & " + str(maxws[i])[0:4] + " & " + str(host_w[i])[0:4] + " & " + str(host_maxw[i])[0:4] - - # newline - string += " \\\\" - print(string) - print("\n\n\n\n") + if doplots: + # generates a table + print("Table of FRB properties for latex") + for i,name in enumerate(tns): + string=name + # detection properties + string += " & " + str(int(DM[i]+0.5)) + " & " + str(z[i])[0:6] + " & " + str(snr[i])[0:4] + string += " & " + str(freq[i])[0:6] + " & " + str(tres[i])[0:6] + " & " + str(w95[i])[0:4] + + # scattering + string += " & " + str(tauobs[i])[0:4] + " & " + str(maxtaus[i])[0:4] + " & " + str(host_tau[i])[0:4] + " & " + str(host_maxtaus[i])[0:4] + + # width + string += " & " + str(wobs[i])[0:4] + " & " + str(maxws[i])[0:4] + " & " + str(host_w[i])[0:4] + " & " + str(host_maxw[i])[0:4] + + # newline + string += " \\\\" + print(string) + print("\n\n\n\n") Nbins=11 bins = np.logspace(-3.,2.,Nbins) @@ -200,153 +291,158 @@ def main(outdir="Fitting_Outputs/"): ############## Observed Histogram ################# - plt.figure() - #obshist,bins = np.histogram(tauobs,bins=bins) - #hosthist,bins = np.histogram(host_tau,bins=bins) - ax1=plt.gca() - plt.xscale("log") - plt.ylim(0,8) - l1 = plt.hist(tauobs,bins=bins,label="Observed",alpha=0.5) - - # makes a function of completeness - xvals,yvals = make_completeness_plot(maxtaus) - - tau_comp = get_completeness(tauobs,xvals,yvals) - - l2 = plt.hist(tauobs,bins=bins,weights = 1./tau_comp,label="Observed",alpha=0.5) - - - ax2 = ax1.twinx() - l3 = ax2.plot(xvals,yvals,label="Completeness") - plt.ylim(0,1) - plt.xlim(1e-2,1e3) - plt.ylabel("Completeness") - plt.sca(ax1) - plt.legend(handles=[l1[2],l3[0],l2[2]],labels=["Observed","Completeness","Corrected"],fontsize=12) - plt.xlabel("$\\tau_{\\rm obs}$ [ms]") - plt.ylabel("Number of FRBs") - plt.text(2e-3,8,"(a)",fontsize=18) - plt.tight_layout() - plt.savefig(outdir+"tau_observed_histogram.png") - plt.close() - + # only plot this if doplotsping! + if doplots: + plt.figure() + #obshist,bins = np.histogram(tauobs,bins=bins) + #hosthist,bins = np.histogram(host_tau,bins=bins) + ax1=plt.gca() + plt.xscale("log") + plt.ylim(0,8) + l1 = plt.hist(tauobs,bins=bins,label="Observed",alpha=0.5) - - ############## 1 GHz Rest-frame Histogram ################# - - plt.figure() - #obshist,bins = np.histogram(tauobs,bins=bins) - #hosthist,bins = np.histogram(host_tau,bins=bins) - ax1=plt.gca() - plt.xscale("log") - plt.ylim(0,8) - l1 = plt.hist(host_tau,bins=bins,label="Observed",alpha=0.5) - - # makes a function of completeness - xvals,yvals = make_completeness_plot(host_maxtaus) - - # get completeness at points of measurement - tau_comp = get_completeness(host_tau,xvals,yvals) - - l2 = plt.hist(host_tau,bins=bins,weights = 1./tau_comp,label="Corrected",alpha=0.5) - - - ax2 = ax1.twinx() - l3 = ax2.plot(xvals,yvals,label="Completeness") - use_this_color = l3[0].get_color() - - plt.ylim(0,1) - plt.xlim(1e-2) - plt.ylabel("Completeness") - plt.sca(ax1) - plt.legend(handles=[l1[2],l3[0],l2[2]],labels=["Observed","Completeness","Corrected"],fontsize=12) - plt.xlabel("$\\tau_{\\rm host, 1\,GHz}$ [ms]") - plt.ylabel("Number of FRBs") - - plt.text(2e-3,8,"(b)",fontsize=18) - plt.tight_layout() - plt.savefig(outdir+"tau_host_histogram.png") - - #### creates a copy of the above, for paper purposes. Does this three times! Once - # for "everything", once for intrinsic, once for observed - - # "everything" figures # - plt.figure() - #obshist,bins = np.histogram(tauobs,bins=bins) - #hosthist,bins = np.histogram(host_tau,bins=bins) - ax1v2=plt.gca() - plt.xscale("log") - plt.ylim(0,8) - l1v2 = plt.hist(host_tau,bins=bins,label="Observed",alpha=0.5) - - # makes a function of completeness - xvals,yvals = make_completeness_plot(host_maxtaus) - - # get completeness at points of measurement - tau_comp = get_completeness(host_tau,xvals,yvals) - - l2v2 = plt.hist(host_tau,bins=bins,weights = 1./tau_comp,label="Corrected",alpha=0.5) - - - ax2v2 = ax1v2.twinx() - l3v2 = ax2v2.plot(xvals,yvals,label="Completeness") - - plt.ylim(0,1) - plt.xlim(1e-2) - plt.ylabel("Completeness") - plt.sca(ax1) - plt.xlabel("$\\tau_{\\rm host, 1\,GHz}$ [ms]") - plt.ylabel("Number of FRBs") - # keeps open for later plotting - don't close this here - - - # "observed" figures # - plt.figure() - #obshist,bins = np.histogram(tauobs,bins=bins) - #hosthist,bins = np.histogram(host_tau,bins=bins) - ax1v3=plt.gca() - plt.xscale("log") - plt.ylim(0,8) - l1v3 = plt.hist(host_tau,bins=bins,label="Observed",alpha=0.5) - - ax2v3 = ax1v3.twinx() - l3v3 = ax2v3.plot(xvals,yvals,label="Completeness") - - plt.ylim(0,1) - plt.xlim(1e-2) - plt.ylabel("Completeness") - plt.sca(ax1) - plt.xlabel("$\\tau_{\\rm host, 1\,GHz}$ [ms]") - plt.ylabel("Number of FRBs") - # keeps open for later plotting - don't close this here - - # "intrinsic" figures # - plt.figure() - #obshist,bins = np.histogram(tauobs,bins=bins) - #hosthist,bins = np.histogram(host_tau,bins=bins) - ax1v4=plt.gca() - plt.xscale("log") - plt.ylim(0,8) - l2v4 = plt.hist(host_tau,bins=bins,weights = 1./tau_comp,label="Corrected",alpha=0.5) - - - ax2v4 = ax1v4.twinx() - l3v4 = ax2v4.plot(xvals,yvals,label="Completeness") - - plt.ylim(0,1) - plt.xlim(1e-2) - plt.ylabel("Completeness") - plt.sca(ax1) - plt.xlabel("$\\tau_{\\rm host, 1\,GHz}$ [ms]") - plt.ylabel("Intrinsic number of FRBs") - # keeps open for later plotting - don't close this here + # makes a function of completeness + xvals,yvals = make_completeness_plot(maxtaus) + + tau_comp = get_completeness(tauobs,xvals,yvals) + + l2 = plt.hist(tauobs,bins=bins,weights = 1./tau_comp,label="Observed",alpha=0.5) + + + ax2 = ax1.twinx() + l3 = ax2.plot(xvals,yvals,label="Completeness") + plt.ylim(0,1) + plt.xlim(1e-2,1e3) + plt.ylabel("Completeness") + plt.sca(ax1) + plt.legend(handles=[l1[2],l3[0],l2[2]],labels=["Observed","Completeness","Corrected"],fontsize=12) + plt.xlabel("$\\tau_{\\rm obs}$ [ms]") + plt.ylabel("Number of FRBs") + plt.text(2e-3,8,"(a)",fontsize=18) + plt.tight_layout() + plt.savefig(outdir+"tau_observed_histogram.png") + plt.close() + + + + ############## 1 GHz Rest-frame Histogram ################# + + plt.figure() + #obshist,bins = np.histogram(tauobs,bins=bins) + #hosthist,bins = np.histogram(host_tau,bins=bins) + ax1=plt.gca() + plt.xscale("log") + plt.ylim(0,8) + l1 = plt.hist(host_tau,bins=bins,label="Observed",alpha=0.5) + + # makes a function of completeness + xvals,yvals = make_completeness_plot(host_maxtaus) + + # get completeness at points of measurement + tau_comp = get_completeness(host_tau,xvals,yvals) + + l2 = plt.hist(host_tau,bins=bins,weights = 1./tau_comp,label="Corrected",alpha=0.5) + + + ax2 = ax1.twinx() + l3 = ax2.plot(xvals,yvals,label="Completeness") + use_this_color = l3[0].get_color() + + plt.ylim(0,1) + plt.xlim(1e-2) + plt.ylabel("Completeness") + plt.sca(ax1) + plt.legend(handles=[l1[2],l3[0],l2[2]],labels=["Observed","Completeness","Corrected"],fontsize=12) + plt.xlabel("$\\tau_{\\rm host, 1\,GHz}$ [ms]") + plt.ylabel("Number of FRBs") + + plt.text(2e-3,8,"(b)",fontsize=18) + plt.tight_layout() + plt.savefig(outdir+"tau_host_histogram.png") + + #### creates a copy of the above, for paper purposes. Does this three times! Once + # for "everything", once for intrinsic, once for observed + + # "everything" figures # + plt.figure() + #obshist,bins = np.histogram(tauobs,bins=bins) + #hosthist,bins = np.histogram(host_tau,bins=bins) + ax1v2=plt.gca() + plt.xscale("log") + plt.ylim(0,8) + l1v2 = plt.hist(host_tau,bins=bins,label="Observed",alpha=0.5) + + # makes a function of completeness + xvals,yvals = make_completeness_plot(host_maxtaus) + + # get completeness at points of measurement + tau_comp = get_completeness(host_tau,xvals,yvals) + + l2v2 = plt.hist(host_tau,bins=bins,weights = 1./tau_comp,label="Corrected",alpha=0.5) + + + ax2v2 = ax1v2.twinx() + l3v2 = ax2v2.plot(xvals,yvals,label="Completeness") + + plt.ylim(0,1) + plt.xlim(1e-2) + plt.ylabel("Completeness") + plt.sca(ax1) + plt.xlabel("$\\tau_{\\rm host, 1\,GHz}$ [ms]") + plt.ylabel("Number of FRBs") + # keeps open for later plotting - don't close this here + + + # "observed" figures # + plt.figure() + #obshist,bins = np.histogram(tauobs,bins=bins) + #hosthist,bins = np.histogram(host_tau,bins=bins) + ax1v3=plt.gca() + plt.xscale("log") + plt.ylim(0,8) + l1v3 = plt.hist(host_tau,bins=bins,label="Observed",alpha=0.5) + + ax2v3 = ax1v3.twinx() + l3v3 = ax2v3.plot(xvals,yvals,label="Completeness") + + plt.ylim(0,1) + plt.xlim(1e-2) + plt.ylabel("Completeness") + plt.sca(ax1) + plt.xlabel("$\\tau_{\\rm host, 1\,GHz}$ [ms]") + plt.ylabel("Number of FRBs") + # keeps open for later plotting - don't close this here + + # "intrinsic" figures # + plt.figure() + #obshist,bins = np.histogram(tauobs,bins=bins) + #hosthist,bins = np.histogram(host_tau,bins=bins) + ax1v4=plt.gca() + plt.xscale("log") + plt.ylim(0,8) + l2v4 = plt.hist(host_tau,bins=bins,weights = 1./tau_comp,label="Corrected",alpha=0.5) + + + ax2v4 = ax1v4.twinx() + l3v4 = ax2v4.plot(xvals,yvals,label="Completeness") + + plt.ylim(0,1) + plt.xlim(1e-2) + plt.ylabel("Completeness") + plt.sca(ax1) + plt.xlabel("$\\tau_{\\rm host, 1\,GHz}$ [ms]") + plt.ylabel("Intrinsic number of FRBs") + # keeps open for later plotting - don't close this here ####################################### TAU - CDF and fitting ################################ - print("\n\n KS test evaluation for tau \n") + if doplots: + print("\n\n KS test evaluation for tau \n") # amplitude, mean, and std dev of true distribution + xvals,yvals = make_completeness_plot(host_maxtaus) + ksbest = [] # begins minimisation for KS statistic for ifunc in np.arange(NFUNC): @@ -358,9 +454,10 @@ def main(outdir="Fitting_Outputs/"): psub1 = get_ks_stat(result.x,*args,plot=False) ksbest.append(result.x) #Best-fitting parameters are [0.85909445 1.45509687] with p-value 0.9202915513749959 - print("FUNCTION ",ifunc,",",FNAMES[ifunc]," Best-fitting parameters are ",result.x," with p-value ",1.-result.fun) + if doplots: + print("FUNCTION ",ifunc,",",FNAMES[ifunc]," Best-fitting parameters are ",result.x," with p-value ",1.-result.fun) - + # adds an extra couple of points here. For.. reasons? xtemp = xvals[::2] ytemp = yvals[::2] s = xtemp.size @@ -368,17 +465,17 @@ def main(outdir="Fitting_Outputs/"): yt = np.zeros([s+2]) xt[0] = xtemp[0] yt[0] = ytemp[0] - xt[1] = 1. - yt[1] = 1. xt[2:-1] = xtemp[1:] yt[2:-1] = ytemp[1:] + xt[1] = 0.75*xt[2] + yt[1] = 1. xt[-1] = 1e5 yt[-1] = 0. cspline = sp.interpolate.make_interp_spline(np.log10(xt), yt,k=1) - # get a spline interpolation of completeness. Should be removed from function! + # get a spline interpolation of completeness. Should be removed from function! # do a test plot of the spline - if True: + if doplots: plt.figure() plt.plot(np.logspace(-5,5,101),cspline(np.linspace(-5,5,101))) plt.plot(xvals,yvals) @@ -390,9 +487,11 @@ def main(outdir="Fitting_Outputs/"): make_cdf_plot(ksbest,host_tau,xvals,yvals,outdir+"bestfit_ks_scat_cumulative.png",cspline) ############################################### TAU - likelihood analysis ################################################# - print("\n\n Max Likelihood Calculation for tau\n") + if doplots: + print("\n\n Max Likelihood Calculation for tau\n") xbest=[] llbests = [] + pbests=[] for ifunc in np.arange(NFUNC): args = (host_tau,cspline,ifunc) x0=ARGS0[ifunc] @@ -403,135 +502,145 @@ def main(outdir="Fitting_Outputs/"): # llbest returns negative ll llbest = get_ll_stat(result.x,host_tau,cspline,ifunc) * -1 llbests.append(llbest) - - print("FUNCTION ",ifunc,",",FNAMES[ifunc]," Best-fitting log-likelihood parameters are ",result.x," with likelihood ",-result.fun) - print(" , BIC is ",2*np.log(host_tau.size) - len(x0)*llbest) + pbests.append(-result.fun) + if doplots: + print("FUNCTION ",ifunc,",",FNAMES[ifunc]," Best-fitting log-likelihood parameters are ",result.x," with likelihood ",-result.fun) + print(" , BIC is ",2*np.log(host_tau.size) - len(x0)*llbest) if ifunc == 0: - llCHIME = get_ll_stat([CHIME_mut,CHIME_st],host_tau,cspline,ifunc) * -1 - print("Compare with CHIME ",llCHIME) - - print("\n\nLATEX TABLE") - for ifunc in np.arange(NFUNC): - string = FNAMES[ifunc] + " & $" + latextau[ifunc] + f"$ & ${xbest[ifunc][0]:.2f}" - for iarg, arg in enumerate(xbest[ifunc]): - if iarg==0: - continue - string += f", {xbest[ifunc][iarg]:.2f}" - string += f" $ & {llbests[ifunc]:.2f} \\\\" - print(string) - print("\n\n\n\n") - - make_cdf_plot(xbest,host_tau,xvals,yvals,outdir+"bestfit_ll_scat_cumulative.png",cspline) + taullCHIME = get_ll_stat([CHIME_mut - np.log10(0.6**mean_alpha),CHIME_st],host_tau,cspline,ifunc) * -1 + if doplots: + print("Compare with CHIME ",taullCHIME) + taullCHIME -= llbests[3] + + tauxbest = xbest + taullbests = llbests + taupbests = pbests + + if doplots: + print("\n\nLATEX TABLE") + for ifunc in np.arange(NFUNC): + string = FNAMES[ifunc] + " & $" + latextau[ifunc] + f"$ & ${xbest[ifunc][0]:.2f}" + for iarg, arg in enumerate(xbest[ifunc]): + if iarg==0: + continue + string += f", {xbest[ifunc][iarg]:.2f}" + string += f" $ & {llbests[ifunc]:.2f} \\\\" + print(string) + print("\n\n\n\n") + + if doplots: + make_cdf_plot(xbest,host_tau,xvals,yvals,outdir+"bestfit_ll_scat_cumulative.png",cspline) ######## does plot with all fits added ######## - - plt.sca(ax1) - NFRB=host_tau.size - handles=[l1[2],l3[0],l2[2]] - labels=["Observed","Completeness","Corrected$"] - styles=["-.","--","--",":"] - for i in np.arange(NFUNC): - print("plotting function ",i," with xbest ",xbest[i]) - xs,ys = function_wrapper(i,xbest[i])#cspline=None): - plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) - l=plt.plot(xs,ys*plotnorm,label=FNAMES[i]) - handles.append(l[0]) - labels.append(FNAMES[i]) - xs,ys = function_wrapper(i,xbest[i],cspline=cspline) - plt.plot(xs,ys*plotnorm,linestyle=":",color=plt.gca().lines[-1].get_color()) - plt.xscale("log") - plt.xlim(1e-2,1e3) - plt.legend() - - plt.legend(handles=handles,labels=labels,fontsize=6) #fontsize=12) - - - plt.savefig(outdir+"tau_host_histogram_fits.png") - plt.close() + if doplots: + plt.sca(ax1) + NFRB=host_tau.size + handles=[l1[2],l3[0],l2[2]] + labels=["Observed","Completeness","Corrected$"] + styles=["-.","--","--",":"] + for i in np.arange(NFUNC): + print("plotting function ",i," with xbest ",xbest[i]) + xs,ys = function_wrapper(i,xbest[i])#cspline=None): + plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) + l=plt.plot(xs,ys*plotnorm,label=FNAMES[i]) + handles.append(l[0]) + labels.append(FNAMES[i]) + xs,ys = function_wrapper(i,xbest[i],cspline=cspline) + plt.plot(xs,ys*plotnorm,linestyle=":",color=plt.gca().lines[-1].get_color()) + plt.xscale("log") + plt.xlim(1e-2,1e3) + plt.legend() + + plt.legend(handles=handles,labels=labels,fontsize=6) #fontsize=12) + + + plt.savefig(outdir+"tau_host_histogram_fits.png") + plt.close() ######## plots for paper ######## - #"everything" - plt.sca(ax1v2) - NFRB=host_tau.size - handles=[l1v2[2],l3v2[0],l2v2[2]] - labels=["Observed","Completeness","Corrected"] - for i in [0,2,3]: - print("plotting function ",i," with xbest ",xbest[i]) - xs,ys = function_wrapper(i,xbest[i])#cspline=None): - plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) - l=plt.plot(xs,ys*plotnorm,label=FNAMES[i]) - handles.append(l[0]) - labels.append(FNAMES[i]) - xs,ys = function_wrapper(i,xbest[i],cspline=cspline) - plt.plot(xs,ys*plotnorm,linestyle=":",color=plt.gca().lines[-1].get_color()) - plt.xscale("log") - plt.xlim(1e-2,1e3) - - plt.text(1e-3,8,"(b)",fontsize=18) - plt.legend() - plt.xlabel("$\\tau_{\\rm host, 1\,GHz}$ [ms]") - plt.ylabel("Number of FRBs") - plt.legend(handles=handles,labels=labels,fontsize=10) #fontsize=12) - - plt.tight_layout() - plt.savefig(outdir+"paper_tau_host_histogram_fits.png") - plt.close() - - #"observed" - plt.sca(ax1v3) - NFRB=host_tau.size - handles=[l1v3[2],l3v3[0]] - labels=["Observed","Completeness"] - for i in [0,2,3]: - print("plotting function ",i," with xbest ",xbest[i]) - xs,ys = function_wrapper(i,xbest[i],logxmax=2)#cspline=None): - plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) - #l=plt.plot(xs,ys*plotnorm,label=FNAMES[i]) - - xs,ys = function_wrapper(i,xbest[i],cspline=cspline) - l=plt.plot(xs,ys*plotnorm,label=FNAMES[i],linestyle=styles[i]) - handles.append(l[0]) - labels.append(FNAMES[i]) - - plt.xscale("log") - plt.xlim(1e-2,1e3) - - plt.text(1e-3,8,"(a)",fontsize=18) - plt.legend() - plt.xlabel("$\\tau_{\\rm host, 1\,GHz}$ [ms]") - plt.ylabel("Observed number of FRBs") - plt.legend(handles=handles,labels=labels,fontsize=10) #fontsize=12) - - plt.tight_layout() - plt.savefig(outdir+"observed_paper_tau_host_histogram_fits.png") - plt.close() - - # "intrinsic" - plt.sca(ax1v4) - NFRB=host_tau.size - handles=[l2v4[2],l3v4[0]] - labels=["Adjusted","Completeness"] - for i in [0,2,3]: - print("plotting function ",i," with xbest ",xbest[i]) - xs,ys = function_wrapper(i,xbest[i],logxmax=2)#cspline=None): - plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) - l=plt.plot(xs,ys*plotnorm,label=FNAMES[i],linestyle=styles[i]) - handles.append(l[0]) - labels.append(FNAMES[i]) - plt.xscale("log") - plt.xlim(1e-2,1e3) - - plt.text(1e-3,8,"(b)",fontsize=18) - plt.legend() - plt.xlabel("$\\tau_{\\rm host, 1\,GHz}$ [ms]") - plt.ylabel("Intrinsic number of FRBs") - plt.legend(handles=handles,labels=labels,fontsize=10) #fontsize=12) - - plt.tight_layout() - plt.savefig(outdir+"intrinsic_paper_tau_host_histogram_fits.png") - plt.close() + if doplots: + #"everything" + plt.sca(ax1v2) + NFRB=host_tau.size + handles=[l1v2[2],l3v2[0],l2v2[2]] + labels=["Observed","Completeness","Corrected"] + for i in [0,2,3]: + print("plotting function ",i," with xbest ",xbest[i]) + xs,ys = function_wrapper(i,xbest[i])#cspline=None): + plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) + l=plt.plot(xs,ys*plotnorm,label=FNAMES[i]) + handles.append(l[0]) + labels.append(FNAMES[i]) + xs,ys = function_wrapper(i,xbest[i],cspline=cspline) + plt.plot(xs,ys*plotnorm,linestyle=":",color=plt.gca().lines[-1].get_color()) + plt.xscale("log") + plt.xlim(1e-2,1e3) + + plt.text(1e-3,8,"(b)",fontsize=18) + plt.legend() + plt.xlabel("$\\tau_{\\rm host, 1\,GHz}$ [ms]") + plt.ylabel("Number of FRBs") + plt.legend(handles=handles,labels=labels,fontsize=10) #fontsize=12) + + plt.tight_layout() + plt.savefig(outdir+"paper_tau_host_histogram_fits.png") + plt.close() + + #"observed" + plt.sca(ax1v3) + NFRB=host_tau.size + handles=[l1v3[2],l3v3[0]] + labels=["Observed","Completeness"] + for i in [0,2,3]: + print("plotting function ",i," with xbest ",xbest[i]) + xs,ys = function_wrapper(i,xbest[i],logxmax=2)#cspline=None): + plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) + #l=plt.plot(xs,ys*plotnorm,label=FNAMES[i]) + + xs,ys = function_wrapper(i,xbest[i],cspline=cspline) + l=plt.plot(xs,ys*plotnorm,label=FNAMES[i],linestyle=styles[i]) + handles.append(l[0]) + labels.append(FNAMES[i]) + + plt.xscale("log") + plt.xlim(1e-2,1e3) + + plt.text(1e-3,8,"(a)",fontsize=18) + plt.legend() + plt.xlabel("$\\tau_{\\rm host, 1\,GHz}$ [ms]") + plt.ylabel("Observed number of FRBs") + plt.legend(handles=handles,labels=labels,fontsize=10) #fontsize=12) + + plt.tight_layout() + plt.savefig(outdir+"observed_paper_tau_host_histogram_fits.png") + plt.close() + + # "intrinsic" + plt.sca(ax1v4) + NFRB=host_tau.size + handles=[l2v4[2],l3v4[0]] + labels=["Adjusted","Completeness"] + for i in [0,2,3]: + print("plotting function ",i," with xbest ",xbest[i]) + xs,ys = function_wrapper(i,xbest[i],logxmax=2)#cspline=None): + plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) + l=plt.plot(xs,ys*plotnorm,label=FNAMES[i],linestyle=styles[i]) + handles.append(l[0]) + labels.append(FNAMES[i]) + plt.xscale("log") + plt.xlim(1e-2,1e3) + + plt.text(1e-3,8,"(b)",fontsize=18) + plt.legend() + plt.xlabel("$\\tau_{\\rm host, 1\,GHz}$ [ms]") + plt.ylabel("Intrinsic number of FRBs") + plt.legend(handles=handles,labels=labels,fontsize=10) #fontsize=12) + + plt.tight_layout() + plt.savefig(outdir+"intrinsic_paper_tau_host_histogram_fits.png") + plt.close() ############################################# TAU - bayes factor ####################################### # priors @@ -541,10 +650,12 @@ def main(outdir="Fitting_Outputs/"): # min mean/min/max: size of distribution # NOTE: Bayes factor is P(data|model1)/P(data|model2) - print("\n\n Bayes Factor Calculation\n") + if doplots: + print("\n\n Bayes Factor Calculation\n") for ifunc in np.arange(NFUNC): if True: - print("skipping Bayes factor calculation for Tau, remove this line to re-run") + if doplots: + print("skipping Bayes factor calculation for Tau, remove this line to re-run") #FUNCTION 0 has likelihood sum 2.6815961887322472e-21 now compute Bayes factor! #FUNCTION 1 has likelihood sum 1.4462729739641349e-15 now compute Bayes factor! #FUNCTION 2 has likelihood sum 5.364450880196842e-16 now compute Bayes factor! @@ -552,7 +663,7 @@ def main(outdir="Fitting_Outputs/"): #FUNCTION 4 has likelihood sum 7.269113417017299e-16 now compute Bayes factor! #FUNCTION 5 has likelihood sum 6.248172055823414e-16 now compute Bayes factor! #FUNCTION 6 has likelihood sum 6.339647378300337e-16 now compute Bayes factor! - continue + break llsum=0. N1=100 @@ -589,7 +700,8 @@ def main(outdir="Fitting_Outputs/"): if (ifunc == 2 or ifunc == 4 or ifunc==5 or ifunc==6): llsum *= 2 #because the parameter space is actually half that calculated above - print("FUNCTION ",ifunc,",",FNAMES[ifunc]," has likelihood sum ",llsum, " now compute Bayes factor!") + if doplots: + print("FUNCTION ",ifunc,",",FNAMES[ifunc]," has likelihood sum ",llsum, " now compute Bayes factor!") @@ -598,141 +710,147 @@ def main(outdir="Fitting_Outputs/"): ############################################## WIDTH ############################################### ###################################################################################################### - print("\n\n\n\n######### WIDTH #########\n") + if doplots: + print("\n\n\n\n######### WIDTH #########\n") ######################################### Observed Histogram ############################################ - plt.figure() - ax1=plt.gca() - plt.xscale("log") - plt.ylim(0,8) - l1 = plt.hist(wobs,bins=bins,label="Observed",alpha=0.5) - - # makes a function of completeness - wxvals,wyvals = make_completeness_plot(maxws) - - wi_comp = get_completeness(wobs,wxvals,wyvals) - - l2 = plt.hist(wobs,bins=bins,weights = 1./wi_comp,label="Observed",alpha=0.5) - - - ax2 = ax1.twinx() - l3 = ax2.plot(wxvals,wyvals,label="Completeness") - plt.ylim(0,1) - plt.xlim(5e-3,1e2) - plt.ylabel("Completeness") - plt.sca(ax1) - plt.legend(handles=[l1[2],l3[0],l2[2]],labels=["Observed","Completeness","Corrected $w_i$"],fontsize=12) - plt.xlabel("$w_i$ [ms]") - plt.ylabel("Number of FRBs") - - plt.text(1e-3,8,"(a)",fontsize=18) - plt.tight_layout() - plt.savefig(outdir+"w_observed_histogram.png") - plt.close() - - ################################ 1 GHz Rest-frame Histogram ########################## - - plt.figure() - ax1=plt.gca() - plt.xscale("log") - plt.ylim(0,8) - l1 = plt.hist(host_w,bins=bins,label="Host",alpha=0.5) - - # makes a function of completeness - wxvals,wyvals = make_completeness_plot(host_maxw) - - # get completeness at points of measurement - w_comp = get_completeness(host_w,wxvals,wyvals) - - l2 = plt.hist(host_w,bins=bins,weights = 1./w_comp,label="Observed",alpha=0.5) - - - ax2 = ax1.twinx() - l3 = ax2.plot(wxvals,wyvals,label="Completeness") - - plt.ylim(0,1) - plt.xlim(5e-3,1e2) - plt.ylabel("Completeness") - plt.sca(ax1) - plt.legend(handles=[l1[2],l3[0],l2[2]],labels=["Observed","Completeness","Corrected"],fontsize=12) - plt.xlabel("$w_{i,\\rm host}$ [ms]") - plt.ylabel("Number of FRBs") - plt.tight_layout() - plt.savefig(outdir+"w_host_histogram.png") - - # keeps open for plotting later - - #### new plot, for paper - just a copy of the above #### - - #"everything" - plt.figure() - plt.xscale("log") - plt.ylim(0,8) - l1v2 = plt.hist(host_w,bins=bins,label="Host",alpha=0.5) + # only plot if doplotsping + if doplots: + plt.figure() + ax1=plt.gca() + plt.xscale("log") + plt.ylim(0,8) + l1 = plt.hist(wobs,bins=bins,label="Observed",alpha=0.5) + + # makes a function of completeness + wxvals,wyvals = make_completeness_plot(maxws) + + wi_comp = get_completeness(wobs,wxvals,wyvals) + + l2 = plt.hist(wobs,bins=bins,weights = 1./wi_comp,label="Observed",alpha=0.5) + + + ax2 = ax1.twinx() + l3 = ax2.plot(wxvals,wyvals,label="Completeness") + plt.ylim(0,1) + plt.xlim(5e-3,1e2) + plt.ylabel("Completeness") + plt.sca(ax1) + plt.legend(handles=[l1[2],l3[0],l2[2]],labels=["Observed","Completeness","Corrected $w_i$"],fontsize=12) + plt.xlabel("$w_i$ [ms]") + plt.ylabel("Number of FRBs") + + plt.text(1e-3,8,"(a)",fontsize=18) + plt.tight_layout() + plt.savefig(outdir+"w_observed_histogram.png") + plt.close() + + ################################ 1 GHz Rest-frame Histogram ########################## + + plt.figure() + ax1=plt.gca() + plt.xscale("log") + plt.ylim(0,8) + l1 = plt.hist(host_w,bins=bins,label="Host",alpha=0.5) + + # makes a function of completeness + wxvals,wyvals = make_completeness_plot(host_maxw) + + # get completeness at points of measurement + w_comp = get_completeness(host_w,wxvals,wyvals) + + l2 = plt.hist(host_w,bins=bins,weights = 1./w_comp,label="Observed",alpha=0.5) + + + ax2 = ax1.twinx() + l3 = ax2.plot(wxvals,wyvals,label="Completeness") + + plt.ylim(0,1) + plt.xlim(5e-3,1e2) + plt.ylabel("Completeness") + plt.sca(ax1) + plt.legend(handles=[l1[2],l3[0],l2[2]],labels=["Observed","Completeness","Corrected"],fontsize=12) + plt.xlabel("$w_{i,\\rm host}$ [ms]") + plt.ylabel("Number of FRBs") + plt.tight_layout() + plt.savefig(outdir+"w_host_histogram.png") + + # keeps open for plotting later + + #### new plot, for paper - just a copy of the above #### + + #"everything" + plt.figure() + plt.xscale("log") + plt.ylim(0,8) + l1v2 = plt.hist(host_w,bins=bins,label="Host",alpha=0.5) + + ax1v2=plt.gca() + # makes a function of completeness + wxvals,wyvals = make_completeness_plot(host_maxw) + + # get completeness at points of measurement + w_comp = get_completeness(host_w,wxvals,wyvals) + + l2v2 = plt.hist(host_w,bins=bins,weights = 1./w_comp,label="Observed",alpha=0.5) + + ax2v2 = ax1v2.twinx() + l3v2 = ax2v2.plot(wxvals,wyvals,label="Completeness")#,color=use_this_color) + + plt.ylim(0,1) + plt.ylabel("Completeness") + plt.sca(ax1v2) + plt.legend(handles=[l1v2[2],l3v2[0],l2v2[2]],labels=["Observed","Completeness","Corrected"],fontsize=12) + plt.xlabel("$w_{i,\\rm host}$ [ms]") + plt.ylabel("Number of FRBs") + + + # "observed" figures # + plt.figure() + #obshist,bins = np.histogram(tauobs,bins=bins) + #hosthist,bins = np.histogram(host_tau,bins=bins) + ax1v3=plt.gca() + plt.xscale("log") + plt.ylim(0,11) + l1v3 = plt.hist(host_w,bins=bins,label="Observed",alpha=0.5) + + ax2v3 = ax1v3.twinx() + l3v3 = ax2v3.plot(wxvals,wyvals,label="Completeness") + + plt.ylim(0,1) + plt.ylabel("Completeness") + plt.sca(ax1) + plt.xlabel("$w_{i,\\rm host}$ [ms]") + plt.ylabel("Number of FRBs") + # keeps open for later plotting - don't close this here + + # "intrinsic" figures # + plt.figure() + #obshist,bins = np.histogram(tauobs,bins=bins) + #hosthist,bins = np.histogram(host_tau,bins=bins) + ax1v4=plt.gca() + plt.xscale("log") + plt.ylim(0,11) + l2v4 = plt.hist(host_w,bins=bins,weights = 1./w_comp,label="Corrected",alpha=0.5) + + + ax2v4 = ax1v4.twinx() + l3v4 = ax2v4.plot(wxvals,wyvals,label="Completeness") + + plt.ylim(0,1) + plt.ylabel("Completeness") + plt.sca(ax1) + plt.xlabel("$w_{\\rm host}$ [ms]") + plt.ylabel("Intrinsic number of FRBs") + # keeps open for later plotting - don't close this here - ax1v2=plt.gca() + ####################### W - likelihood maximisation ################# # makes a function of completeness wxvals,wyvals = make_completeness_plot(host_maxw) - - # get completeness at points of measurement - w_comp = get_completeness(host_w,wxvals,wyvals) - - l2v2 = plt.hist(host_w,bins=bins,weights = 1./w_comp,label="Observed",alpha=0.5) - - ax2v2 = ax1v2.twinx() - l3v2 = ax2v2.plot(wxvals,wyvals,label="Completeness")#,color=use_this_color) - - plt.ylim(0,1) - plt.ylabel("Completeness") - plt.sca(ax1v2) - plt.legend(handles=[l1v2[2],l3v2[0],l2v2[2]],labels=["Observed","Completeness","Corrected"],fontsize=12) - plt.xlabel("$w_{i,\\rm host}$ [ms]") - plt.ylabel("Number of FRBs") - - - # "observed" figures # - plt.figure() - #obshist,bins = np.histogram(tauobs,bins=bins) - #hosthist,bins = np.histogram(host_tau,bins=bins) - ax1v3=plt.gca() - plt.xscale("log") - plt.ylim(0,11) - l1v3 = plt.hist(host_w,bins=bins,label="Observed",alpha=0.5) - - ax2v3 = ax1v3.twinx() - l3v3 = ax2v3.plot(wxvals,wyvals,label="Completeness") - - plt.ylim(0,1) - plt.ylabel("Completeness") - plt.sca(ax1) - plt.xlabel("$w_{i,\\rm host}$ [ms]") - plt.ylabel("Number of FRBs") - # keeps open for later plotting - don't close this here - - # "intrinsic" figures # - plt.figure() - #obshist,bins = np.histogram(tauobs,bins=bins) - #hosthist,bins = np.histogram(host_tau,bins=bins) - ax1v4=plt.gca() - plt.xscale("log") - plt.ylim(0,11) - l2v4 = plt.hist(host_w,bins=bins,weights = 1./w_comp,label="Corrected",alpha=0.5) - - - ax2v4 = ax1v4.twinx() - l3v4 = ax2v4.plot(wxvals,wyvals,label="Completeness") - - plt.ylim(0,1) - plt.ylabel("Completeness") - plt.sca(ax1) - plt.xlabel("$w_{\\rm host}$ [ms]") - plt.ylabel("Intrinsic number of FRBs") - # keeps open for later plotting - don't close this here - - ####################### W - likelihood maximisation ################# - + ####################################### Width - CDF and fitting ################################ - print("\n\n KS test evaluation for width \n") + if doplots: + print("\n\n KS test evaluation for width \n") # amplitude, mean, and std dev of true distribution ksbest = [] @@ -744,11 +862,13 @@ def main(outdir="Fitting_Outputs/"): result = sp.optimize.minimize(get_ks_stat,x0=x0,args=args,method = 'Nelder-Mead') psub1 = get_ks_stat(result.x,*args,plot=False) ksbest.append(result.x) - print("FUNCTION ",ifunc,",",FNAMES[ifunc]," Best-fitting parameters are ",result.x," with p-value ",-result.fun) + if doplots: + print("FUNCTION ",ifunc,",",FNAMES[ifunc]," Best-fitting parameters are ",result.x," with p-value ",-result.fun) - print("\n\n Maximum likelihood for width \n") + if doplots: + print("\n\n Maximum likelihood for width \n") ### makes temporary values for completeness xtemp = wxvals[::2] ytemp = wyvals[::2] @@ -757,16 +877,16 @@ def main(outdir="Fitting_Outputs/"): yt = np.zeros([s+2]) xt[0] = xtemp[0] yt[0] = ytemp[0] - xt[1] = 0.1 - yt[1] = 1. xt[2:-1] = xtemp[1:] yt[2:-1] = ytemp[1:] + xt[1] = xt[2]*0.75 + yt[1] = 1. xt[-1] = 1e5 yt[-1] = 0. cspline = sp.interpolate.make_interp_spline(np.log10(xt), yt,k=1) # do a test plot of the spline? - if True: + if doplots: plt.figure() plt.plot(np.logspace(-5,5,101),cspline(np.linspace(-5,5,101))) plt.plot(xvals,yvals) @@ -774,14 +894,17 @@ def main(outdir="Fitting_Outputs/"): plt.savefig(outdir+"width_spline_example.png") plt.close() # make a cdf plot of the best fits - make_cdf_plot(ksbest,host_tau,xvals,yvals,outdir+"bestfit_ks_width_cumulative.png",cspline) + if doplots: + make_cdf_plot(ksbest,host_tau,xvals,yvals,outdir+"bestfit_ks_width_cumulative.png",cspline) ####################################### Width - max likelihood ################################ - print("\n\n Likelhiood maximasation for width \n") + if doplots: + print("\n\n Likelhiood maximasation for width \n") # amplitude, mean, and std dev of true distribution xbest=[] llbests = [] + pvals = [] # iterate over functions to calculate max likelihood for ifunc in np.arange(NFUNC): @@ -792,136 +915,138 @@ def main(outdir="Fitting_Outputs/"): llbests.append(llbest) #psub1 = get_ks_stat(result.x,*args,plot=True) xbest.append(result.x) - print("width FUNCTION ",ifunc,",",FNAMES[ifunc]," Best-fitting log-likelihood parameters are ",result.x," with p-value ",1.-result.fun) + pvals.append(-result.fun) + if doplots: + print("width FUNCTION ",ifunc,",",FNAMES[ifunc]," Best-fitting log-likelihood parameters are ",result.x," with p-value ",-result.fun) if ifunc == 0: - llCHIME = get_ll_stat([CHIME_muw,CHIME_sw],host_tau,cspline,ifunc) * -1 - print("Compare with CHIME ",llCHIME) - - - print("\n\nLATEX TABLE") - for ifunc in np.arange(NFUNC): - string = FNAMES[ifunc] + " & $" + latexw[ifunc] + f"$ & ${xbest[ifunc][0]:.2f}" - for iarg, arg in enumerate(xbest[ifunc]): - if iarg==0: - continue - string += f", {xbest[ifunc][iarg]:.2f}" - string += f" $ & {llbests[ifunc]:.2f} \\\\" - print(string) - print("\n\n\n\n") - - - - make_cdf_plot(xbest,host_w,wxvals,wyvals,outdir+"bestfit_ll_width_cumulative.png",cspline,width=True) + wllCHIME = get_ll_stat([CHIME_muw,CHIME_sw],host_tau,cspline,ifunc) * -1 + if doplots: + print("Compare with CHIME ",wllCHIME) + wllCHIME -= llbests[3] + if doplots: + print("\n\nLATEX TABLE") + for ifunc in np.arange(NFUNC): + string = FNAMES[ifunc] + " & $" + latexw[ifunc] + f"$ & ${xbest[ifunc][0]:.2f}" + for iarg, arg in enumerate(xbest[ifunc]): + if iarg==0: + continue + string += f", {xbest[ifunc][iarg]:.2f}" + string += f" $ & {llbests[ifunc]:.2f} \\\\" + print(string) + print("\n\n\n\n") + + if doplots: + make_cdf_plot(xbest,host_w,wxvals,wyvals,outdir+"bestfit_ll_width_cumulative.png",cspline,width=True) ### does plot with fits added ### - - plt.sca(ax1) - NFRB=host_tau.size - handles=[l1[2],l3[0],l2[2]] - labels=["$w_{\\rm host}$","Completeness","Corrected $w_{\\rm host}$"] - for i in np.arange(NFUNC): - print("plotting function ",i," with xbest ",xbest[i]) - xs,ys = function_wrapper(i,xbest[i])#cspline=None): - plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) - l=plt.plot(xs,ys*plotnorm,label=FNAMES[i]) - handles.append(l[0]) - labels.append(FNAMES[i]) - xs,ys = function_wrapper(i,xbest[i],cspline=cspline) - plt.plot(xs,ys*plotnorm,linestyle=":",color=plt.gca().lines[-1].get_color()) - plt.xscale("log") - plt.xlim(1e-4,1e3) - plt.legend() - - plt.legend(handles=handles,labels=labels,fontsize=6) #fontsize=12) - plt.savefig(outdir+"w_host_histogram_fits.png") - plt.close() - - #### for paper #### - - - plt.sca(ax1v2) - plt.ylim(0,12) - NFRB=host_w.size - handles=[l1v2[2],l3v2[0],l2v2[2]] - labels=["Observed","Completeness","Corrected"] - for i in [0,1,3]: - print("plotting function ",i," with xbest ",xbest[i]) - xs,ys = function_wrapper(i,xbest[i])#cspline=None): - plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) - l=plt.plot(xs,ys*plotnorm,label=FNAMES[i]) - handles.append(l[0]) - labels.append(FNAMES[i]) - xs,ys = function_wrapper(i,xbest[i],cspline=cspline) - plt.plot(xs,ys*plotnorm,linestyle=":",color=plt.gca().lines[-1].get_color()) - plt.xscale("log") - - plt.text(1e-3,12,"(b)",fontsize=18) - plt.xlim(5e-3,1e2) - plt.legend() - plt.xlabel("$w_{\\rm host}$ [ms]") - plt.ylabel("Number of FRBs") - plt.legend(handles=handles,labels=labels,fontsize=12) #fontsize=12) - - plt.tight_layout() - plt.savefig(outdir+"paper_w_host_histogram_fits.png") - plt.close() - - - #"observed" - plt.sca(ax1v3) - NFRB=host_tau.size - handles=[l1v3[2],l3v3[0]] - labels=["Observed","Completeness"] - for i in [0,1,3]: - print("plotting function ",i," with xbest ",xbest[i]) - xs,ys = function_wrapper(i,xbest[i],logxmax=2)#cspline=None): - plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) - #l=plt.plot(xs,ys*plotnorm,label=FNAMES[i]) - - xs,ys = function_wrapper(i,xbest[i],cspline=cspline) - l=plt.plot(xs,ys*plotnorm,label=FNAMES[i],linestyle=styles[i]) - handles.append(l[0]) - labels.append(FNAMES[i]) - - plt.xscale("log") - plt.xlim(5e-3,1e2) - - plt.text(1e-3,10.5,"(a)",fontsize=18) - plt.legend() - plt.xlabel("$w_{\\rm host}$ [ms]") - plt.ylabel("Observed number of FRBs") - plt.legend(handles=handles,labels=labels,fontsize=10) #fontsize=12) - - plt.tight_layout() - plt.savefig(outdir+"observed_paper_width_host_histogram_fits.png") - plt.close() - - # "intrinsic" - plt.sca(ax1v4) - NFRB=host_tau.size - handles=[l2v4[2],l3v4[0]] - labels=["Adjusted","Completeness"] - - for i in [0,1,3]: - print("plotting function ",i," with xbest ",xbest[i]) - xs,ys = function_wrapper(i,xbest[i],logxmax=2)#cspline=None): - plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) - l=plt.plot(xs,ys*plotnorm,label=FNAMES[i],linestyle=styles[i]) - handles.append(l[0]) - labels.append(FNAMES[i]) - plt.xscale("log") - plt.xlim(5e-3,1e2) - - plt.text(1e-3,10.5,"(b)",fontsize=18) - plt.legend() - plt.xlabel("$w_{\\rm host}$ [ms]") - plt.ylabel("Intrinsic number of FRBs") - plt.legend(handles=handles,labels=labels,fontsize=10) #fontsize=12) - - plt.tight_layout() - plt.savefig(outdir+"intrinsic_paper_width_host_histogram_fits.png") - plt.close() + if doplots: + plt.sca(ax1) + NFRB=host_tau.size + handles=[l1[2],l3[0],l2[2]] + labels=["$w_{\\rm host}$","Completeness","Corrected $w_{\\rm host}$"] + for i in np.arange(NFUNC): + print("plotting function ",i," with xbest ",xbest[i]) + xs,ys = function_wrapper(i,xbest[i])#cspline=None): + plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) + l=plt.plot(xs,ys*plotnorm,label=FNAMES[i]) + handles.append(l[0]) + labels.append(FNAMES[i]) + xs,ys = function_wrapper(i,xbest[i],cspline=cspline) + plt.plot(xs,ys*plotnorm,linestyle=":",color=plt.gca().lines[-1].get_color()) + plt.xscale("log") + plt.xlim(1e-4,1e3) + plt.legend() + + plt.legend(handles=handles,labels=labels,fontsize=6) #fontsize=12) + plt.savefig(outdir+"w_host_histogram_fits.png") + plt.close() + + #### for paper #### + + + plt.sca(ax1v2) + plt.ylim(0,12) + NFRB=host_w.size + handles=[l1v2[2],l3v2[0],l2v2[2]] + labels=["Observed","Completeness","Corrected"] + for i in [0,1,3]: + print("plotting function ",i," with xbest ",xbest[i]) + xs,ys = function_wrapper(i,xbest[i])#cspline=None): + plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) + l=plt.plot(xs,ys*plotnorm,label=FNAMES[i]) + handles.append(l[0]) + labels.append(FNAMES[i]) + xs,ys = function_wrapper(i,xbest[i],cspline=cspline) + plt.plot(xs,ys*plotnorm,linestyle=":",color=plt.gca().lines[-1].get_color()) + plt.xscale("log") + + plt.text(1e-3,12,"(b)",fontsize=18) + plt.xlim(5e-3,1e2) + plt.legend() + plt.xlabel("$w_{\\rm host}$ [ms]") + plt.ylabel("Number of FRBs") + plt.legend(handles=handles,labels=labels,fontsize=12) #fontsize=12) + + plt.tight_layout() + plt.savefig(outdir+"paper_w_host_histogram_fits.png") + plt.close() + + + #"observed" + plt.sca(ax1v3) + NFRB=host_tau.size + handles=[l1v3[2],l3v3[0]] + labels=["Observed","Completeness"] + for i in [0,1,3]: + print("plotting function ",i," with xbest ",xbest[i]) + xs,ys = function_wrapper(i,xbest[i],logxmax=2)#cspline=None): + plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) + #l=plt.plot(xs,ys*plotnorm,label=FNAMES[i]) + + xs,ys = function_wrapper(i,xbest[i],cspline=cspline) + l=plt.plot(xs,ys*plotnorm,label=FNAMES[i],linestyle=styles[i]) + handles.append(l[0]) + labels.append(FNAMES[i]) + + plt.xscale("log") + plt.xlim(5e-3,1e2) + + plt.text(1e-3,10.5,"(a)",fontsize=18) + plt.legend() + plt.xlabel("$w_{\\rm host}$ [ms]") + plt.ylabel("Observed number of FRBs") + plt.legend(handles=handles,labels=labels,fontsize=10) #fontsize=12) + + plt.tight_layout() + plt.savefig(outdir+"observed_paper_width_host_histogram_fits.png") + plt.close() + + # "intrinsic" + plt.sca(ax1v4) + NFRB=host_tau.size + handles=[l2v4[2],l3v4[0]] + labels=["Adjusted","Completeness"] + + for i in [0,1,3]: + print("plotting function ",i," with xbest ",xbest[i]) + xs,ys = function_wrapper(i,xbest[i],logxmax=2)#cspline=None): + plotnorm = NFRB * (np.log10(bins[1])-np.log10(bins[0])) + l=plt.plot(xs,ys*plotnorm,label=FNAMES[i],linestyle=styles[i]) + handles.append(l[0]) + labels.append(FNAMES[i]) + plt.xscale("log") + plt.xlim(5e-3,1e2) + + plt.text(1e-3,10.5,"(b)",fontsize=18) + plt.legend() + plt.xlabel("$w_{\\rm host}$ [ms]") + plt.ylabel("Intrinsic number of FRBs") + plt.legend(handles=handles,labels=labels,fontsize=10) #fontsize=12) + + plt.tight_layout() + plt.savefig(outdir+"intrinsic_paper_width_host_histogram_fits.png") + plt.close() ############################################# WIDTH - bayes factor ####################################### @@ -932,10 +1057,12 @@ def main(outdir="Fitting_Outputs/"): # min mean/min/max: size of distribution # NOTE: Bayes factor is P(data|model1)/P(data|model2) - print("\n\n Bayes Factor Calculation\n") + if doplots: + print("\n\n Bayes Factor Calculation\n") for ifunc in np.arange(NFUNC): if True: - print("skipping Bayes factor calculation for width, remove this line to re-run") + if doplots: + print("skipping Bayes factor calculation for width, remove this line to re-run") #FUNCTION 0 , lognormal has likelihood sum 4.287511548315901e-15 now compute Bayes factor! #FUNCTION 1 , half-lognormal has likelihood sum 1.3919340351669428e-12 now compute Bayes factor! #FUNCTION 2 , boxcar has likelihood sum 3.1091474793575766e-13 now compute Bayes factor! @@ -943,7 +1070,7 @@ def main(outdir="Fitting_Outputs/"): #FUNCTION 4 , smooth boxcar has likelihood sum 8.176332379444694e-13 now compute Bayes factor! #FUNCTION 5 , upper sb has likelihood sum 3.595417631395158e-13 now compute Bayes factor! #FUNCTION 6 , lower sb has likelihood sum 6.806166849685634e-13 now compute Bayes factor! - continue + break llsum=0. N1=100 @@ -980,9 +1107,11 @@ def main(outdir="Fitting_Outputs/"): if (ifunc == 2 or ifunc == 4 or ifunc==5 or ifunc==6): llsum *= 2 #because the parameter space is actually half that calculated above - print("FUNCTION ",ifunc,",",FNAMES[ifunc]," has likelihood sum ",llsum, " now compute Bayes factor!") - + if doplots: + print("FUNCTION ",ifunc,",",FNAMES[ifunc]," has likelihood sum ",llsum, " now compute Bayes factor!") + # returns found values + return tauxbest, taullbests, taupbests, xbest, llbests, pvals,taullCHIME,wllCHIME def plot_functions(outdir=""): """ @@ -1029,9 +1158,6 @@ def plot_functions(outdir=""): plt.plot(xs,7+ys/np.max(ys)-1.1*ifunc,label=FNAMES[ifunc],linestyle=styles[ifunc%4]) plt.text(xlabels[ifunc],7.5-1.1*ifunc,FNAMES[ifunc],color=plt.gca().lines[-1].get_color()) - - - plt.xlim(1e-4,1e4) plt.xlabel("t [ms]") plt.xscale("log") @@ -1406,25 +1532,32 @@ def get_data(): ERR=9999. tns = dataframe.TNS tauobs = dataframe.TauObs + tauobserr = dataframe.TauObsErr w95 = dataframe.W95 wsnr = dataframe.Wsnr z = dataframe.Z snr = dataframe.SNdet freq = dataframe.NUTau DM = dataframe.DM + alpha = dataframe.TauAlpha + alphaerr = dataframe.TauAlphaErr - # check which FRBs do not have nay errors in all columns + # check which FRBs do not have any errors in all columns OK = getOK([tns,tauobs,w95,wsnr,z,snr,freq,DM]) tns = tns[OK] tauobs= tauobs[OK] + tauobserr = tauobserr[OK] w95 = w95[OK] wsnr = wsnr[OK] z = z[OK] snr = snr[OK] freq = freq[OK] DM = DM[OK] + alpha = alpha[OK] + alphaerr = alphaerr[OK] NFRB = len(OK) + tres = np.zeros([NFRB]) for i,name in enumerate(tns): j = np.where(name[0:8] == names)[0] @@ -1445,8 +1578,11 @@ def get_data(): freq = np.array(freq) DM = np.array(DM) tres = np.array(tres) + tauobserr = np.array(tauobserr) + alpha = np.array(alpha) + alphaerr = np.array(alphaerr) - return tns,tauobs,w95,wsnr,z,snr,freq,DM,tres + return tns,tauobs,w95,wsnr,z,snr,freq,DM,tres,tauobserr,alpha,alphaerr @@ -1496,4 +1632,172 @@ def make_cum_dist(vals): ys[-1] = 1 return xs,ys -main() + +# actual best fits +truetauxbest, truetaullbests, truetaupbests, truewxbest, truewllbests, truewpvalst,taullCHIME,wllCHIME = fit_scat_width(bootstrap=False,doplots=True,alpha=-4) + +######## resused bootstrap code for alpha ############# + + +NALPHA=11 +alphas = np.linspace(-4,0,NALPHA) +NPARAMS=3 # max params in any given model +taufitresults = np.zeros([NALPHA,NFUNC,NPARAMS]) +wfitresults = np.zeros([NALPHA,NFUNC,NPARAMS]) +taullresults = np.zeros([NALPHA,NFUNC]) +wllresults = np.zeros([NALPHA,NFUNC]) + + +for i,alpha in enumerate(alphas): + # max likelihood fitting results for all models + tauxbest, taullbests, taupbests, wxbest, wllbests, wpvals,taullCHIME,wllCHIME = fit_scat_width(alpha=alpha) + + print("ALPHA is ",alpha," relative log likelihoods are ",taullCHIME,wllCHIME) + + for j,res in enumerate(tauxbest): + length = len(res) + taufitresults[i,j,:length]=res + + for j,res in enumerate(wxbest): + length = len(res) + wfitresults[i,j,:length]=res + + taullresults[i,:] = taullbests + wllresults[i,:] = wllbests + +plt.figure() +plt.xlabel("$\\alpha$") +plt.ylabel("$\\log_{10} {L} - \\log_{10} {L_{\\rm lognormal}}$") +#doit = np.arange(NFUNC) +doit = [1,2,3] +styles = ["-","--","-.",":","-","--","-.",":","-","--","-.",":"] +ax1 = plt.gca() +ax2 = ax1.twinx() + + +for i,j in enumerate(doit): + string="" + ax1.plot(alphas,taullresults[:,j]-taullresults[:,0],label=FNAMES[j],linestyle=styles[i]) + #plt.plot(alphas,wllresults[:,j]-wllresults[:,0],label=FNAMES[j],linestyle=styles[i]) + +ax2.plot(alphas,taufitresults[:,0,0],label="ASKAP $\mu_{\\tau, 1\,GHz}$",color="black",linestyle="-") +ax2.plot(alphas,CHIME_mut + np.log10((1000/600)**alphas),label="CHIME $\\mu_{\\tau, 1\,GHz}$",color="black",linestyle="--") + +#ax2.plot(alphas,taufitresults[:,0,1],label="ASKAP $\sigma_{\\tau, 1\,GHz}$",color="gray",linestyle="-.") +#ax2.plot([alphas[0],alphas[-1]],[CHIME_st,CHIME_st],label="CHIME $\\sigma_{\\tau, 1\,GHz}$",color="gray",linestyle=":") +plt.sca(ax2) +plt.ylabel("Parameter values") +#plt.legend(loc="upper right") +plt.legend() +plt.sca(ax1) + +#plt.legend(loc="lower left") +plt.legend() +plt.tight_layout() +plt.savefig("alpha_variation_results.png") +#plt.savefig("width_alpha_variation_results.png") +plt.close() + + +######### does this for bootstrapping scattering ###### + +NBOOTSTRAP=100 +NPARAMS=3 # max params in any given model +taufitresults = np.zeros([NBOOTSTRAP,NFUNC,NPARAMS]) +wfitresults = np.zeros([NBOOTSTRAP,NFUNC,NPARAMS]) +taullresults = np.zeros([NBOOTSTRAP,NFUNC]) +wllresults = np.zeros([NBOOTSTRAP,NFUNC]) + + +for i in np.arange(NBOOTSTRAP): + # max likelihood fitting results for all models + print("Doing bootstrap ",i) + tauxbest, taullbests, taupbests, wxbest, wllbests, wpvals,taullCHIME,wllCHIME = fit_scat_width(bootstrap=True,bsalpha=False) + + for j,res in enumerate(tauxbest): + length = len(res) + taufitresults[i,j,:length]=res + + for j,res in enumerate(wxbest): + length = len(res) + wfitresults[i,j,:length]=res + + taullresults[i,:] = taullbests + wllresults[i,:] = wllbests + +plt.figure() +plt.xlabel("run") +plt.ylabel("param value") +print("Parameters for tau") +for j in np.arange(NFUNC): + string="" + for k,val in enumerate(truetauxbest[j]): + plt.plot(taufitresults[:,j,k],label=FNAMES[j]+" param "+str(k)) + mean = np.sum(taufitresults[:,j,k])/NBOOTSTRAP + rms = (np.sum((taufitresults[:,j,k] - mean)**2)/(NBOOTSTRAP-1))**0.5 + print(j,k,"Orig mean ",val," this mean ",mean," std err ",rms) + print("\n") +plt.legend(fontsize=4) +plt.tight_layout() +plt.savefig("tau_bootstrap_results.png") +plt.close() + + +plt.figure() +plt.xlabel("run") +plt.ylabel("param value") +print("\n\n\n\nParameters for width") +for j in np.arange(NFUNC): + string="" + for k,val in enumerate(truewxbest[j]): + plt.plot(wfitresults[:,j,k],label=FNAMES[j]+" param "+str(k)) + mean = np.sum(wfitresults[:,j,k])/NBOOTSTRAP + rms = (np.sum((wfitresults[:,j,k] - mean)**2)/(NBOOTSTRAP-1))**0.5 + print(j,k,"Orig mean ",val," this mean ",mean," std err ",rms) + print("\n") +plt.legend(fontsize=4) +plt.tight_layout() +plt.savefig("width_bootstrap_results.png") +plt.close() + + +# generates a distribution in difference of log-likelihood values for each model compared to lognormal +#Scattering + +for i in np.arange(NFUNC): + + print("ll for function ",FNAMES[i]) + #removes nans + OK = np.where(np.isfinite(taullresults[:,i]) == True)[0] + NOK = len(OK) + mean = np.mean(taullresults[OK,i]) + rms = (np.sum((taullresults[OK,i]-mean)**2)/(NOK-1))**0.5 + print("For scattering, mean and rms in likelihood is ",mean,rms) + + OK = np.where(np.isfinite(wllresults[:,i]) == True)[0] + NOK = len(OK) + mean = np.mean(wllresults[OK,i]) + rms = (np.sum((wllresults[OK,i]-mean)**2)/(NOK-1))**0.5 + print("For width, mean and rms in likelihood is ",mean,rms) + print("\n") + +print("##### log likelihood differences #####") + +for i in np.arange(NFUNC): + if i==0: + continue + print("dll for function ",FNAMES[i]) + dtaullresults = taullresults[:,i] - taullresults[:,0] + OK = np.where(np.isfinite(dtaullresults) == True)[0] + NOK = len(OK) + mean = np.mean(dtaullresults[OK]) + rms = (np.sum((dtaullresults[OK]-mean)**2)/(NOK-1))**0.5 + print("For scattering, mean and rms in likelihood difference is ",mean,rms) + + dwllresults = wllresults[:,i] - wllresults[:,0] + OK = np.where(np.isfinite(dwllresults) == True)[0] + NOK = len(OK) + mean = np.mean(dwllresults[OK]) + rms = (np.sum((dwllresults[OK]-mean)**2)/(NOK-1))**0.5 + print("For width, mean and rms in likelihood difference is ",mean,rms) + print("\n") diff --git a/papers/lsst/Data/craft_ics_hosts.csv b/papers/lsst/Data/craft_ics_hosts.csv new file mode 100644 index 00000000..cde8de12 --- /dev/null +++ b/papers/lsst/Data/craft_ics_hosts.csv @@ -0,0 +1,32 @@ +TNS,z,mr,pmr +20180924B,0.3214,21.32 +20181112A,0.4755,21.49 +20190102C,0.29,20.73 +20190608B,0.1178,17.15 +20190611B,0.378,22.35 +20190711A,0.522,22.93 +20190714A,0.2365,19.47 +20191001A,0.23,17.82 +20191228B,0.2432,21.92 +20200430A,0.161,21.18 +20200906A,0.3688,20.70 +20210117A,0.214,22.95 +20210320C,0.28,19.23 +20210807D,0.1293,17.35 +20211127I,0.046946,15.38 +20211203C,0.3439,20.29 +20211212A,0.0707,16.21 +20220105A,0.2785,21.53 +20220501C,0.381,20.57 +20220610A,1.015,23.99 +20220725A,0.1926,17.83 +20220918A,0.491,23.6 +20221106A,0.2044,18.34 +20230526A,0.157,21.15 +20230708A,0.105,22.73 +20230902A,0.3619,21.52 +20231226A,0.156,19.01 +20240201A,0.042729,16.97 +20240210A,0.023686,15.13 +20240304A,0.2423,21.08 +20240310A,0.127,20.16 diff --git a/papers/lsst/Data/dsa_hosts.csv b/papers/lsst/Data/dsa_hosts.csv new file mode 100644 index 00000000..e3f31bc9 --- /dev/null +++ b/papers/lsst/Data/dsa_hosts.csv @@ -0,0 +1,40 @@ +z,mr,phost +0.0112,13.25,0.99 +0.0368,15.41,0.99 +0.0433,16.15,0.97 +0.0894,16.51,0.97 +0.0939,19.10,0.99 +0.1139,20.20,0.97 +0.1270,19.94,0.95 +0.1582,19.43,0.98 +0.2395,18.81,0.99 +0.2414,20.74,0.99 +0.2481,20.45,0.98 +0.2505,21.38,0.99 +0.2621,18.53,0.90 +0.2706,19.48,0.97 +0.2764,19.01,0.94 +0.2847,19.64,0.99 +0.300,19.91,0.99 +0.3015,21.86,0.99 +0.3270,19.97,0.99 +0.3510,20.80,0.56 +0.3619,20.95,0.99 +0.3714,23.35,0.63 +0.4012,23.00,0.99 +0.4525,21.36,0.99 +0.4780,21.20,0.97 +0.5310,21.38,0.42 +0.5422,22.84,0.62 +0.5530,22.61,0.99 +0.6214,21.22,0.97 +0.9750,23.17,0.92 +0.1185,19.33,0.99 +0.21,20.36,0.99 +0.262,22.7,0.99 +0.3355,20.49,0.99 +0.287,21.12,0.99 +0.376,21.20,0.98 +0.553,22.90,0.99 +0.968,21.91,0.99 +1.354,21.15,0.95 diff --git a/papers/lsst/Data/meerkat_mr.txt b/papers/lsst/Data/meerkat_mr.txt new file mode 100644 index 00000000..74ddaf7d --- /dev/null +++ b/papers/lsst/Data/meerkat_mr.txt @@ -0,0 +1,25 @@ +# r-band magnitudes of FRB hosts from Table D1 of Pastor-Morales et al +# https://arxiv.org/pdf/2507.05982 +# -ve z means photometric redshift +# 9999 means no data +# loc=0 is incoherent detection, 1 is coherent +# FRB DMX zspec mr loc w +20220222 974 0.853 23.86 1 1 +20220224 1055 0.6271 21.63 0 1 +20230125 459 0.3265 22.12 1 1 +20230306 636 9999 9999 1 1 +20230413 1461 9999 9999 1 1 +20230503 350 -0.32 20.11 0 1 +20230613 427 0.3923 20.132 1 1 +20230808 590 0.3472 22.3 1 0.51 +20230808 590 0.3472 22.9 1 0.49 +20230814 249 9999 22.03 0 1 +20230827 1364 9999 9999 1 1 +20230907 973 0.4638 19.83 0 1 +20231007 2597 9999 9999 0 1 +20231010 370 -0.61 21.24 1 0.52 +20231010 370 -0.84 22.52 1 0.47 +20231020 892 0.4775 21.81 0 1 +20231204 1700 9999 9999 1 1 +20231210 662 -0.5 21.19 1 0.81 +20231210 662 -1.37 23.9 1 0.148 diff --git a/papers/lsst/Photometric/CRACO/Smeared.ecsv b/papers/lsst/Photometric/CRACO/Smeared.ecsv new file mode 100644 index 00000000..59ddb7d7 --- /dev/null +++ b/papers/lsst/Photometric/CRACO/Smeared.ecsv @@ -0,0 +1,121 @@ +# %ECSV 1.0 +# --- +# datatype: +# - {name: TNS, datatype: string} +# - {name: DM, datatype: float64} +# - {name: RA, datatype: string} +# - {name: DEC, datatype: string} +# - {name: Z, datatype: float64} +# - {name: SNR, datatype: float64} +# - {name: WIDTH, datatype: float64} +# - {name: Gl, unit: deg, datatype: float64} +# - {name: Gb, unit: deg, datatype: float64} +# - {name: DMG, datatype: float64} +# - {name: FBAR, datatype: float64} +# - {name: BW, datatype: float64} +# meta: !!omap +# - {survey_data: '{"observing": {"NORM_FRB": 17,"TOBS": 64.68,"MAX_IW": 8, "MAXWMETH": 2, "Z_PHOTO": 0.03}, +# "telescope": {"BEAM": "CRACO_900", "DMMASK": "craco_900_mask.npy", +# "DIAM": 12.0, "NBEAMS": 1, "NBINS": 5, "FBAR": 906, +# "TRES": 13.8, "FRES": 1.0, "THRESH": 1.01}}'} +TNS DM RA DEC Z SNR WIDTH Gl Gb DMG FBAR BW +0 934.0882936484263 00:00:00 00:00:00 0.785778162562317 29.141753808504433 -1.0 -1.0 -1.0 35.0 888 288 +1 616.5738602258931 00:00:00 00:00:00 0.8349135358303849 10.160405042320036 -1.0 -1.0 -1.0 35.0 888 288 +2 946.6052448796315 00:00:00 00:00:00 0.6474993506647525 10.652772181822607 -1.0 -1.0 -1.0 35.0 888 288 +3 586.5727013781997 00:00:00 00:00:00 0.12495187173178796 12.415341138097366 -1.0 -1.0 -1.0 35.0 888 288 +4 2153.3992719321486 00:00:00 00:00:00 0.8036468331892808 12.24289806494758 -1.0 -1.0 -1.0 35.0 888 288 +5 1343.7265012800563 00:00:00 00:00:00 1.727489385549202 11.09240844582027 -1.0 -1.0 -1.0 35.0 888 288 +6 529.8895060996854 00:00:00 00:00:00 0.603211700191891 13.133173041616157 -1.0 -1.0 -1.0 35.0 888 288 +7 1022.7800218616562 00:00:00 00:00:00 1.2888620684968957 10.300851732186109 -1.0 -1.0 -1.0 35.0 888 288 +8 412.87460299855934 00:00:00 00:00:00 0.21865062413724176 12.846680716802911 -1.0 -1.0 -1.0 35.0 888 288 +9 701.6217045820609 00:00:00 00:00:00 0.7299571477022802 10.341966201772214 -1.0 -1.0 -1.0 35.0 888 288 +10 205.82201397136845 00:00:00 00:00:00 0.06074866029287343 20.941567140489564 -1.0 -1.0 -1.0 35.0 888 288 +11 1120.7016388974587 00:00:00 00:00:00 1.1724572933789503 11.41994898605589 -1.0 -1.0 -1.0 35.0 888 288 +12 275.5937204880448 00:00:00 00:00:00 0.09245951767772358 91.38695892704916 -1.0 -1.0 -1.0 35.0 888 288 +13 436.349204138444 00:00:00 00:00:00 0.44564144183478205 15.275450861578175 -1.0 -1.0 -1.0 35.0 888 288 +14 295.6406494853049 00:00:00 00:00:00 0.1442212477744424 11.219409175951904 -1.0 -1.0 -1.0 35.0 888 288 +15 291.6471440894715 00:00:00 00:00:00 0.2990568845451625 11.18814447805394 -1.0 -1.0 -1.0 35.0 888 288 +16 1618.8558318442904 00:00:00 00:00:00 0.9448819250948836 24.749670081652773 -1.0 -1.0 -1.0 35.0 888 288 +17 983.9563468218174 00:00:00 00:00:00 0.5110387741801308 12.591584618769005 -1.0 -1.0 -1.0 35.0 888 288 +18 676.3365646959189 00:00:00 00:00:00 0.8336236470816221 10.476494180170306 -1.0 -1.0 -1.0 35.0 888 288 +19 645.2399497353754 00:00:00 00:00:00 0.49888042615577133 10.286082085018421 -1.0 -1.0 -1.0 35.0 888 288 +20 539.5953081699104 00:00:00 00:00:00 0.6182169860530876 35.45318302068964 -1.0 -1.0 -1.0 35.0 888 288 +21 443.6632526190763 00:00:00 00:00:00 0.596702703542215 14.319328661152863 -1.0 -1.0 -1.0 35.0 888 288 +22 81.45567951589115 00:00:00 00:00:00 0.06896887062626902 15.566080612070119 -1.0 -1.0 -1.0 35.0 888 288 +23 374.91893100970947 00:00:00 00:00:00 0.08654025153807617 29.766613679561118 -1.0 -1.0 -1.0 35.0 888 288 +24 1001.3592175483924 00:00:00 00:00:00 1.1219928711003506 12.965473066837115 -1.0 -1.0 -1.0 35.0 888 288 +25 711.626862396593 00:00:00 00:00:00 0.14070511599819086 13.229440860953233 -1.0 -1.0 -1.0 35.0 888 288 +26 356.4667997984884 00:00:00 00:00:00 0.44644296786158993 20.3929051321851 -1.0 -1.0 -1.0 35.0 888 288 +27 405.6849410353128 00:00:00 00:00:00 0.43892086227384686 12.020783061083032 -1.0 -1.0 -1.0 35.0 888 288 +28 438.56039965043755 00:00:00 00:00:00 0.41899323244205144 14.962222736067005 -1.0 -1.0 -1.0 35.0 888 288 +29 1032.0031340800927 00:00:00 00:00:00 1.0111782328413856 12.249965125983609 -1.0 -1.0 -1.0 35.0 888 288 +30 217.45075943955328 00:00:00 00:00:00 0.18461768474454746 13.329278119983234 -1.0 -1.0 -1.0 35.0 888 288 +31 337.87773894962976 00:00:00 00:00:00 0.3539648344531408 15.842464362846469 -1.0 -1.0 -1.0 35.0 888 288 +32 747.9605061576287 00:00:00 00:00:00 0.40398282515571504 11.35266012864496 -1.0 -1.0 -1.0 35.0 888 288 +33 721.5025019044709 00:00:00 00:00:00 0.9018979316139177 20.893413876660702 -1.0 -1.0 -1.0 35.0 888 288 +34 1087.4264164816361 00:00:00 00:00:00 1.035096011417865 12.559979217082482 -1.0 -1.0 -1.0 35.0 888 288 +35 163.23187688161033 00:00:00 00:00:00 0.1333990348830121 78.61411210143966 -1.0 -1.0 -1.0 35.0 888 288 +36 515.446178879044 00:00:00 00:00:00 0.7752482941845488 11.417411530656945 -1.0 -1.0 -1.0 35.0 888 288 +37 392.6137583987218 00:00:00 00:00:00 0.31949791108182546 16.89212239318249 -1.0 -1.0 -1.0 35.0 888 288 +38 719.6387498724689 00:00:00 00:00:00 0.9168854467146328 22.483314792466093 -1.0 -1.0 -1.0 35.0 888 288 +39 1095.9625583277343 00:00:00 00:00:00 1.3932609853721545 13.438743382703526 -1.0 -1.0 -1.0 35.0 888 288 +40 758.3297416501492 00:00:00 00:00:00 0.6401398012876773 12.85406115188814 -1.0 -1.0 -1.0 35.0 888 288 +41 196.42239794857238 00:00:00 00:00:00 0.15281141706879714 36.32216074676754 -1.0 -1.0 -1.0 35.0 888 288 +42 300.9638203550653 00:00:00 00:00:00 0.38656404639181324 13.974445331317181 -1.0 -1.0 -1.0 35.0 888 288 +43 819.1568769824953 00:00:00 00:00:00 0.8538690229315276 10.172610101290998 -1.0 -1.0 -1.0 35.0 888 288 +44 672.8493862791563 00:00:00 00:00:00 0.6796843371842363 13.890953706380596 -1.0 -1.0 -1.0 35.0 888 288 +45 171.9322515506212 00:00:00 00:00:00 0.151862894364487 34.7413812643017 -1.0 -1.0 -1.0 35.0 888 288 +46 1425.9053080008052 00:00:00 00:00:00 1.7591365750119778 12.7456387947933 -1.0 -1.0 -1.0 35.0 888 288 +47 489.25638240698544 00:00:00 00:00:00 0.5271052638239554 16.173416507939116 -1.0 -1.0 -1.0 35.0 888 288 +48 303.6841850164093 00:00:00 00:00:00 0.25795471657648067 10.375992085957462 -1.0 -1.0 -1.0 35.0 888 288 +49 875.3662532129479 00:00:00 00:00:00 0.6415497100158352 10.214362638300399 -1.0 -1.0 -1.0 35.0 888 288 +50 716.6818881006753 00:00:00 00:00:00 0.6864110893626921 14.452157966062835 -1.0 -1.0 -1.0 35.0 888 288 +51 1780.2827105839892 00:00:00 00:00:00 1.3436702111751808 10.3884232109404 -1.0 -1.0 -1.0 35.0 888 288 +52 996.2724896444047 00:00:00 00:00:00 0.5216743407822534 13.213225298728211 -1.0 -1.0 -1.0 35.0 888 288 +53 1698.7961537425167 00:00:00 00:00:00 1.5613492775221662 12.607503410081835 -1.0 -1.0 -1.0 35.0 888 288 +54 1126.5413295499866 00:00:00 00:00:00 1.0336520109034335 17.051568734514 -1.0 -1.0 -1.0 35.0 888 288 +55 450.88973429474333 00:00:00 00:00:00 0.36252077841437375 10.69280211941846 -1.0 -1.0 -1.0 35.0 888 288 +56 973.3753477739596 00:00:00 00:00:00 1.1828217991774477 17.506572347460136 -1.0 -1.0 -1.0 35.0 888 288 +57 1489.8174891533963 00:00:00 00:00:00 0.46010099973404217 52.449273584894264 -1.0 -1.0 -1.0 35.0 888 288 +58 884.0458853234747 00:00:00 00:00:00 0.94457312363897 15.638104651466023 -1.0 -1.0 -1.0 35.0 888 288 +59 1371.7618432631962 00:00:00 00:00:00 1.4252404386072601 11.871898609476887 -1.0 -1.0 -1.0 35.0 888 288 +60 779.9123200563506 00:00:00 00:00:00 0.7695273462562608 11.377991388220451 -1.0 -1.0 -1.0 35.0 888 288 +61 244.32835931715942 00:00:00 00:00:00 0.1512188351440696 22.08068896327773 -1.0 -1.0 -1.0 35.0 888 288 +62 170.42643162915402 00:00:00 00:00:00 0.15963831683903665 11.891868553016314 -1.0 -1.0 -1.0 35.0 888 288 +63 697.6397584685641 00:00:00 00:00:00 0.6155151419755035 10.141332615940971 -1.0 -1.0 -1.0 35.0 888 288 +64 896.3411776022986 00:00:00 00:00:00 0.83352003928136 13.956903508254495 -1.0 -1.0 -1.0 35.0 888 288 +65 673.4550793235062 00:00:00 00:00:00 0.5008895818994006 34.96309565327556 -1.0 -1.0 -1.0 35.0 888 288 +66 409.3504561455062 00:00:00 00:00:00 0.38651409263865677 17.073302923618904 -1.0 -1.0 -1.0 35.0 888 288 +67 1279.5121634320094 00:00:00 00:00:00 1.766754211557735 12.06090823054981 -1.0 -1.0 -1.0 35.0 888 288 +68 455.1932713332237 00:00:00 00:00:00 0.7629777494045573 13.581352455460024 -1.0 -1.0 -1.0 35.0 888 288 +69 212.5080624683044 00:00:00 00:00:00 0.24562999100714805 72.07735525486979 -1.0 -1.0 -1.0 35.0 888 288 +70 208.83082789773542 00:00:00 00:00:00 0.09345708729049836 35.86335125977792 -1.0 -1.0 -1.0 35.0 888 288 +71 218.52612475416436 00:00:00 00:00:00 0.2169993110272826 30.226196813374152 -1.0 -1.0 -1.0 35.0 888 288 +72 1591.9222149862642 00:00:00 00:00:00 1.8072297925518148 12.417298253806685 -1.0 -1.0 -1.0 35.0 888 288 +73 218.79115225062986 00:00:00 00:00:00 0.031212774818377767 14.217908248312149 -1.0 -1.0 -1.0 35.0 888 288 +74 618.4593505087528 00:00:00 00:00:00 0.7560975114810532 11.664308801315117 -1.0 -1.0 -1.0 35.0 888 288 +75 652.0130715223407 00:00:00 00:00:00 0.7242255489992683 12.468451895755134 -1.0 -1.0 -1.0 35.0 888 288 +76 673.0857403351645 00:00:00 00:00:00 0.9253391770587509 14.050855469722556 -1.0 -1.0 -1.0 35.0 888 288 +77 785.3518302003876 00:00:00 00:00:00 1.1363130575887341 11.213356812360049 -1.0 -1.0 -1.0 35.0 888 288 +78 921.812608616067 00:00:00 00:00:00 0.5308587220894548 10.282098860310228 -1.0 -1.0 -1.0 35.0 888 288 +79 656.9337821238685 00:00:00 00:00:00 0.7718799884749266 11.233631650878838 -1.0 -1.0 -1.0 35.0 888 288 +80 1138.704218112882 00:00:00 00:00:00 1.4103564032980764 26.539797617550633 -1.0 -1.0 -1.0 35.0 888 288 +81 620.5653235040173 00:00:00 00:00:00 0.3608403500538981 21.45481401537115 -1.0 -1.0 -1.0 35.0 888 288 +82 838.2486260752073 00:00:00 00:00:00 0.9692149421266859 10.446614306626344 -1.0 -1.0 -1.0 35.0 888 288 +83 552.5443734504142 00:00:00 00:00:00 0.48084252190706855 21.543404977178355 -1.0 -1.0 -1.0 35.0 888 288 +84 573.8507253223063 00:00:00 00:00:00 0.48740732199390757 11.084164653890928 -1.0 -1.0 -1.0 35.0 888 288 +85 420.6998183657156 00:00:00 00:00:00 0.44320207044782256 17.32850048985882 -1.0 -1.0 -1.0 35.0 888 288 +86 439.5439082870578 00:00:00 00:00:00 0.2783782253902463 35.833665311539534 -1.0 -1.0 -1.0 35.0 888 288 +87 1130.0482166914742 00:00:00 00:00:00 1.2831967999076628 12.35679852892303 -1.0 -1.0 -1.0 35.0 888 288 +88 450.20877052020955 00:00:00 00:00:00 0.19926746916562046 12.665759219918368 -1.0 -1.0 -1.0 35.0 888 288 +89 110.1657516020332 00:00:00 00:00:00 0.1195172001672582 18.200353247030144 -1.0 -1.0 -1.0 35.0 888 288 +90 443.8604241510417 00:00:00 00:00:00 0.33340333195524796 13.242335799793306 -1.0 -1.0 -1.0 35.0 888 288 +91 444.686556791516 00:00:00 00:00:00 0.4191471059154911 13.80369034602879 -1.0 -1.0 -1.0 35.0 888 288 +92 562.5781184397683 00:00:00 00:00:00 0.680176372049107 10.27995831481763 -1.0 -1.0 -1.0 35.0 888 288 +93 167.62653574722364 00:00:00 00:00:00 0.0389571382922253 17.570916297751136 -1.0 -1.0 -1.0 35.0 888 288 +94 481.62428400668705 00:00:00 00:00:00 0.48270894964902716 11.826070143713975 -1.0 -1.0 -1.0 35.0 888 288 +95 475.5794761937849 00:00:00 00:00:00 0.5935539854869426 12.987091653999931 -1.0 -1.0 -1.0 35.0 888 288 +96 295.3711932454349 00:00:00 00:00:00 0.2618277155954746 11.871291318754192 -1.0 -1.0 -1.0 35.0 888 288 +97 1364.3326396162597 00:00:00 00:00:00 1.5749185090159343 10.696711455581303 -1.0 -1.0 -1.0 35.0 888 288 +98 584.9391974583868 00:00:00 00:00:00 0.6519095988358196 10.372529937559637 -1.0 -1.0 -1.0 35.0 888 288 +99 1076.886030040255 00:00:00 00:00:00 1.023384185705521 14.901426606089192 -1.0 -1.0 -1.0 35.0 888 288 diff --git a/papers/lsst/Photometric/CRACO/Smeared_and_zFrac.ecsv b/papers/lsst/Photometric/CRACO/Smeared_and_zFrac.ecsv new file mode 100644 index 00000000..5d0c95a4 --- /dev/null +++ b/papers/lsst/Photometric/CRACO/Smeared_and_zFrac.ecsv @@ -0,0 +1,95 @@ +# %ECSV 1.0 +# --- +# datatype: +# - {name: TNS, datatype: string} +# - {name: DM, datatype: float64} +# - {name: RA, datatype: string} +# - {name: DEC, datatype: string} +# - {name: Z, datatype: float64} +# - {name: SNR, datatype: float64} +# - {name: WIDTH, datatype: float64} +# - {name: Gl, unit: deg, datatype: float64} +# - {name: Gb, unit: deg, datatype: float64} +# - {name: DMG, datatype: float64} +# - {name: FBAR, datatype: float64} +# - {name: BW, datatype: float64} +# meta: !!omap +# - {survey_data: '{"observing": {"NORM_FRB": 17,"TOBS": 64.68,"MAX_IW": 8, "MAXWMETH": 2, "Z_FRACTION": 24.7, "Z_PHOTO": 0.03}, +# "telescope": {"BEAM": "CRACO_900", "DMMASK": "craco_900_mask.npy", +# "DIAM": 12.0, "NBEAMS": 1, "NBINS": 5, "FBAR": 906, +# "TRES": 13.8, "FRES": 1.0, "THRESH": 1.01}}'} +TNS DM RA DEC Z SNR WIDTH Gl Gb DMG FBAR BW +1 616.5738602258931 00:00:00 00:00:00 0.8349135358303849 10.160405042320036 -1.0 -1.0 -1.0 35.0 888 288 +2 946.6052448796315 00:00:00 00:00:00 0.6474993506647525 10.652772181822607 -1.0 -1.0 -1.0 35.0 888 288 +3 586.5727013781997 00:00:00 00:00:00 0.12495187173178796 12.415341138097366 -1.0 -1.0 -1.0 35.0 888 288 +4 2153.3992719321486 00:00:00 00:00:00 0.8036468331892808 12.24289806494758 -1.0 -1.0 -1.0 35.0 888 288 +6 529.8895060996854 00:00:00 00:00:00 0.603211700191891 13.133173041616157 -1.0 -1.0 -1.0 35.0 888 288 +8 412.87460299855934 00:00:00 00:00:00 0.21865062413724176 12.846680716802911 -1.0 -1.0 -1.0 35.0 888 288 +9 701.6217045820609 00:00:00 00:00:00 0.7299571477022802 10.341966201772214 -1.0 -1.0 -1.0 35.0 888 288 +10 205.82201397136845 00:00:00 00:00:00 0.06074866029287343 20.941567140489564 -1.0 -1.0 -1.0 35.0 888 288 +12 275.5937204880448 00:00:00 00:00:00 0.09245951767772358 91.38695892704916 -1.0 -1.0 -1.0 35.0 888 288 +13 436.349204138444 00:00:00 00:00:00 0.44564144183478205 15.275450861578175 -1.0 -1.0 -1.0 35.0 888 288 +14 295.6406494853049 00:00:00 00:00:00 0.1442212477744424 11.219409175951904 -1.0 -1.0 -1.0 35.0 888 288 +15 291.6471440894715 00:00:00 00:00:00 0.2990568845451625 11.18814447805394 -1.0 -1.0 -1.0 35.0 888 288 +16 1618.8558318442904 00:00:00 00:00:00 0.9448819250948836 24.749670081652773 -1.0 -1.0 -1.0 35.0 888 288 +17 983.9563468218174 00:00:00 00:00:00 0.5110387741801308 12.591584618769005 -1.0 -1.0 -1.0 35.0 888 288 +19 645.2399497353754 00:00:00 00:00:00 0.49888042615577133 10.286082085018421 -1.0 -1.0 -1.0 35.0 888 288 +20 539.5953081699104 00:00:00 00:00:00 0.6182169860530876 35.45318302068964 -1.0 -1.0 -1.0 35.0 888 288 +21 443.6632526190763 00:00:00 00:00:00 0.596702703542215 14.319328661152863 -1.0 -1.0 -1.0 35.0 888 288 +22 81.45567951589115 00:00:00 00:00:00 0.06896887062626902 15.566080612070119 -1.0 -1.0 -1.0 35.0 888 288 +23 374.91893100970947 00:00:00 00:00:00 0.08654025153807617 29.766613679561118 -1.0 -1.0 -1.0 35.0 888 288 +25 711.626862396593 00:00:00 00:00:00 0.14070511599819086 13.229440860953233 -1.0 -1.0 -1.0 35.0 888 288 +26 356.4667997984884 00:00:00 00:00:00 0.44644296786158993 20.3929051321851 -1.0 -1.0 -1.0 35.0 888 288 +27 405.6849410353128 00:00:00 00:00:00 0.43892086227384686 12.020783061083032 -1.0 -1.0 -1.0 35.0 888 288 +28 438.56039965043755 00:00:00 00:00:00 0.41899323244205144 14.962222736067005 -1.0 -1.0 -1.0 35.0 888 288 +29 1032.0031340800927 00:00:00 00:00:00 1.0111782328413856 12.249965125983609 -1.0 -1.0 -1.0 35.0 888 288 +30 217.45075943955328 00:00:00 00:00:00 0.18461768474454746 13.329278119983234 -1.0 -1.0 -1.0 35.0 888 288 +31 337.87773894962976 00:00:00 00:00:00 0.3539648344531408 15.842464362846469 -1.0 -1.0 -1.0 35.0 888 288 +32 747.9605061576287 00:00:00 00:00:00 0.40398282515571504 11.35266012864496 -1.0 -1.0 -1.0 35.0 888 288 +35 163.23187688161033 00:00:00 00:00:00 0.1333990348830121 78.61411210143966 -1.0 -1.0 -1.0 35.0 888 288 +37 392.6137583987218 00:00:00 00:00:00 0.31949791108182546 16.89212239318249 -1.0 -1.0 -1.0 35.0 888 288 +40 758.3297416501492 00:00:00 00:00:00 0.6401398012876773 12.85406115188814 -1.0 -1.0 -1.0 35.0 888 288 +41 196.42239794857238 00:00:00 00:00:00 0.15281141706879714 36.32216074676754 -1.0 -1.0 -1.0 35.0 888 288 +42 300.9638203550653 00:00:00 00:00:00 0.38656404639181324 13.974445331317181 -1.0 -1.0 -1.0 35.0 888 288 +43 819.1568769824953 00:00:00 00:00:00 0.8538690229315276 10.172610101290998 -1.0 -1.0 -1.0 35.0 888 288 +45 171.9322515506212 00:00:00 00:00:00 0.151862894364487 34.7413812643017 -1.0 -1.0 -1.0 35.0 888 288 +47 489.25638240698544 00:00:00 00:00:00 0.5271052638239554 16.173416507939116 -1.0 -1.0 -1.0 35.0 888 288 +48 303.6841850164093 00:00:00 00:00:00 0.25795471657648067 10.375992085957462 -1.0 -1.0 -1.0 35.0 888 288 +49 875.3662532129479 00:00:00 00:00:00 0.6415497100158352 10.214362638300399 -1.0 -1.0 -1.0 35.0 888 288 +50 716.6818881006753 00:00:00 00:00:00 0.6864110893626921 14.452157966062835 -1.0 -1.0 -1.0 35.0 888 288 +51 1780.2827105839892 00:00:00 00:00:00 1.3436702111751808 10.3884232109404 -1.0 -1.0 -1.0 35.0 888 288 +52 996.2724896444047 00:00:00 00:00:00 0.5216743407822534 13.213225298728211 -1.0 -1.0 -1.0 35.0 888 288 +54 1126.5413295499866 00:00:00 00:00:00 1.0336520109034335 17.051568734514 -1.0 -1.0 -1.0 35.0 888 288 +55 450.88973429474333 00:00:00 00:00:00 0.36252077841437375 10.69280211941846 -1.0 -1.0 -1.0 35.0 888 288 +57 1489.8174891533963 00:00:00 00:00:00 0.46010099973404217 52.449273584894264 -1.0 -1.0 -1.0 35.0 888 288 +61 244.32835931715942 00:00:00 00:00:00 0.1512188351440696 22.08068896327773 -1.0 -1.0 -1.0 35.0 888 288 +62 170.42643162915402 00:00:00 00:00:00 0.15963831683903665 11.891868553016314 -1.0 -1.0 -1.0 35.0 888 288 +64 896.3411776022986 00:00:00 00:00:00 0.83352003928136 13.956903508254495 -1.0 -1.0 -1.0 35.0 888 288 +65 673.4550793235062 00:00:00 00:00:00 0.5008895818994006 34.96309565327556 -1.0 -1.0 -1.0 35.0 888 288 +66 409.3504561455062 00:00:00 00:00:00 0.38651409263865677 17.073302923618904 -1.0 -1.0 -1.0 35.0 888 288 +68 455.1932713332237 00:00:00 00:00:00 0.7629777494045573 13.581352455460024 -1.0 -1.0 -1.0 35.0 888 288 +69 212.5080624683044 00:00:00 00:00:00 0.24562999100714805 72.07735525486979 -1.0 -1.0 -1.0 35.0 888 288 +70 208.83082789773542 00:00:00 00:00:00 0.09345708729049836 35.86335125977792 -1.0 -1.0 -1.0 35.0 888 288 +71 218.52612475416436 00:00:00 00:00:00 0.2169993110272826 30.226196813374152 -1.0 -1.0 -1.0 35.0 888 288 +73 218.79115225062986 00:00:00 00:00:00 0.031212774818377767 14.217908248312149 -1.0 -1.0 -1.0 35.0 888 288 +74 618.4593505087528 00:00:00 00:00:00 0.7560975114810532 11.664308801315117 -1.0 -1.0 -1.0 35.0 888 288 +75 652.0130715223407 00:00:00 00:00:00 0.7242255489992683 12.468451895755134 -1.0 -1.0 -1.0 35.0 888 288 +76 673.0857403351645 00:00:00 00:00:00 0.9253391770587509 14.050855469722556 -1.0 -1.0 -1.0 35.0 888 288 +79 656.9337821238685 00:00:00 00:00:00 0.7718799884749266 11.233631650878838 -1.0 -1.0 -1.0 35.0 888 288 +80 1138.704218112882 00:00:00 00:00:00 1.4103564032980764 26.539797617550633 -1.0 -1.0 -1.0 35.0 888 288 +81 620.5653235040173 00:00:00 00:00:00 0.3608403500538981 21.45481401537115 -1.0 -1.0 -1.0 35.0 888 288 +83 552.5443734504142 00:00:00 00:00:00 0.48084252190706855 21.543404977178355 -1.0 -1.0 -1.0 35.0 888 288 +84 573.8507253223063 00:00:00 00:00:00 0.48740732199390757 11.084164653890928 -1.0 -1.0 -1.0 35.0 888 288 +85 420.6998183657156 00:00:00 00:00:00 0.44320207044782256 17.32850048985882 -1.0 -1.0 -1.0 35.0 888 288 +86 439.5439082870578 00:00:00 00:00:00 0.2783782253902463 35.833665311539534 -1.0 -1.0 -1.0 35.0 888 288 +88 450.20877052020955 00:00:00 00:00:00 0.19926746916562046 12.665759219918368 -1.0 -1.0 -1.0 35.0 888 288 +89 110.1657516020332 00:00:00 00:00:00 0.1195172001672582 18.200353247030144 -1.0 -1.0 -1.0 35.0 888 288 +90 443.8604241510417 00:00:00 00:00:00 0.33340333195524796 13.242335799793306 -1.0 -1.0 -1.0 35.0 888 288 +91 444.686556791516 00:00:00 00:00:00 0.4191471059154911 13.80369034602879 -1.0 -1.0 -1.0 35.0 888 288 +92 562.5781184397683 00:00:00 00:00:00 0.680176372049107 10.27995831481763 -1.0 -1.0 -1.0 35.0 888 288 +93 167.62653574722364 00:00:00 00:00:00 0.0389571382922253 17.570916297751136 -1.0 -1.0 -1.0 35.0 888 288 +94 481.62428400668705 00:00:00 00:00:00 0.48270894964902716 11.826070143713975 -1.0 -1.0 -1.0 35.0 888 288 +95 475.5794761937849 00:00:00 00:00:00 0.5935539854869426 12.987091653999931 -1.0 -1.0 -1.0 35.0 888 288 +96 295.3711932454349 00:00:00 00:00:00 0.2618277155954746 11.871291318754192 -1.0 -1.0 -1.0 35.0 888 288 +98 584.9391974583868 00:00:00 00:00:00 0.6519095988358196 10.372529937559637 -1.0 -1.0 -1.0 35.0 888 288 +99 1076.886030040255 00:00:00 00:00:00 1.023384185705521 14.901426606089192 -1.0 -1.0 -1.0 35.0 888 288 diff --git a/papers/lsst/Photometric/CRACO/Spectroscopic.ecsv b/papers/lsst/Photometric/CRACO/Spectroscopic.ecsv new file mode 100644 index 00000000..92589bc5 --- /dev/null +++ b/papers/lsst/Photometric/CRACO/Spectroscopic.ecsv @@ -0,0 +1,121 @@ +# %ECSV 1.0 +# --- +# datatype: +# - {name: TNS, datatype: string} +# - {name: DM, datatype: float64} +# - {name: RA, datatype: string} +# - {name: DEC, datatype: string} +# - {name: Z, datatype: float64} +# - {name: SNR, datatype: float64} +# - {name: WIDTH, datatype: float64} +# - {name: Gl, unit: deg, datatype: float64} +# - {name: Gb, unit: deg, datatype: float64} +# - {name: DMG, datatype: float64} +# - {name: FBAR, datatype: float64} +# - {name: BW, datatype: float64} +# meta: !!omap +# - {survey_data: '{"observing": {"NORM_FRB": 17,"TOBS": 64.68,"MAX_IW": 8, "MAXWMETH": 2}, +# "telescope": {"BEAM": "CRACO_900", "DMMASK": "craco_900_mask.npy", +# "DIAM": 12.0, "NBEAMS": 1, "NBINS": 5, "FBAR": 906, +# "TRES": 13.8, "FRES": 1.0, "THRESH": 1.01}}'} +TNS DM RA DEC Z SNR WIDTH Gl Gb DMG FBAR BW +0 934.0882936484263 00:00:00 00:00:00 0.7921673000689103 29.141753808504433 -1.0 -1.0 -1.0 35.0 888 288 +1 616.5738602258931 00:00:00 00:00:00 0.8905487730463858 10.160405042320036 -1.0 -1.0 -1.0 35.0 888 288 +2 946.6052448796315 00:00:00 00:00:00 0.6740538677349737 10.652772181822607 -1.0 -1.0 -1.0 35.0 888 288 +3 586.5727013781997 00:00:00 00:00:00 0.076308758479808 12.415341138097366 -1.0 -1.0 -1.0 35.0 888 288 +4 2153.3992719321486 00:00:00 00:00:00 0.7719105683125631 12.24289806494758 -1.0 -1.0 -1.0 35.0 888 288 +5 1343.7265012800563 00:00:00 00:00:00 1.6762564040612347 11.09240844582027 -1.0 -1.0 -1.0 35.0 888 288 +6 529.8895060996854 00:00:00 00:00:00 0.6148674711657728 13.133173041616157 -1.0 -1.0 -1.0 35.0 888 288 +7 1022.7800218616562 00:00:00 00:00:00 1.2954271609214607 10.300851732186109 -1.0 -1.0 -1.0 35.0 888 288 +8 412.87460299855934 00:00:00 00:00:00 0.20437591335992997 12.846680716802911 -1.0 -1.0 -1.0 35.0 888 288 +9 701.6217045820609 00:00:00 00:00:00 0.7971964864511405 10.341966201772214 -1.0 -1.0 -1.0 35.0 888 288 +10 205.82201397136845 00:00:00 00:00:00 0.035084600186595774 20.941567140489564 -1.0 -1.0 -1.0 35.0 888 288 +11 1120.7016388974587 00:00:00 00:00:00 1.1190141360593933 11.41994898605589 -1.0 -1.0 -1.0 35.0 888 288 +12 275.5937204880448 00:00:00 00:00:00 0.18195765690908733 91.38695892704916 -1.0 -1.0 -1.0 35.0 888 288 +13 436.349204138444 00:00:00 00:00:00 0.43889114213816116 15.275450861578175 -1.0 -1.0 -1.0 35.0 888 288 +14 295.6406494853049 00:00:00 00:00:00 0.20716845953629162 11.219409175951904 -1.0 -1.0 -1.0 35.0 888 288 +15 291.6471440894715 00:00:00 00:00:00 0.25353674073624666 11.18814447805394 -1.0 -1.0 -1.0 35.0 888 288 +16 1618.8558318442904 00:00:00 00:00:00 0.8938125640216341 24.749670081652773 -1.0 -1.0 -1.0 35.0 888 288 +17 983.9563468218174 00:00:00 00:00:00 0.5198177045244614 12.591584618769005 -1.0 -1.0 -1.0 35.0 888 288 +18 676.3365646959189 00:00:00 00:00:00 0.9106560144996961 10.476494180170306 -1.0 -1.0 -1.0 35.0 888 288 +19 645.2399497353754 00:00:00 00:00:00 0.5654604349809239 10.286082085018421 -1.0 -1.0 -1.0 35.0 888 288 +20 539.5953081699104 00:00:00 00:00:00 0.5580214108898183 35.45318302068964 -1.0 -1.0 -1.0 35.0 888 288 +21 443.6632526190763 00:00:00 00:00:00 0.5313222499329159 14.319328661152863 -1.0 -1.0 -1.0 35.0 888 288 +22 81.45567951589115 00:00:00 00:00:00 0.03213122638515013 15.566080612070119 -1.0 -1.0 -1.0 35.0 888 288 +23 374.91893100970947 00:00:00 00:00:00 0.1090230764363179 29.766613679561118 -1.0 -1.0 -1.0 35.0 888 288 +24 1001.3592175483924 00:00:00 00:00:00 1.1373611970579036 12.965473066837115 -1.0 -1.0 -1.0 35.0 888 288 +25 711.626862396593 00:00:00 00:00:00 0.14735854842537668 13.229440860953233 -1.0 -1.0 -1.0 35.0 888 288 +26 356.4667997984884 00:00:00 00:00:00 0.4130361237696566 20.3929051321851 -1.0 -1.0 -1.0 35.0 888 288 +27 405.6849410353128 00:00:00 00:00:00 0.4022419934538231 12.020783061083032 -1.0 -1.0 -1.0 35.0 888 288 +28 438.56039965043755 00:00:00 00:00:00 0.3146015685795931 14.962222736067005 -1.0 -1.0 -1.0 35.0 888 288 +29 1032.0031340800927 00:00:00 00:00:00 1.029340230030751 12.249965125983609 -1.0 -1.0 -1.0 35.0 888 288 +30 217.45075943955328 00:00:00 00:00:00 0.19288729564479787 13.329278119983234 -1.0 -1.0 -1.0 35.0 888 288 +31 337.87773894962976 00:00:00 00:00:00 0.3614030695297573 15.842464362846469 -1.0 -1.0 -1.0 35.0 888 288 +32 747.9605061576287 00:00:00 00:00:00 0.39117198763502004 11.35266012864496 -1.0 -1.0 -1.0 35.0 888 288 +33 721.5025019044709 00:00:00 00:00:00 0.878613557350197 20.893413876660702 -1.0 -1.0 -1.0 35.0 888 288 +34 1087.4264164816361 00:00:00 00:00:00 1.0208693279100218 12.559979217082482 -1.0 -1.0 -1.0 35.0 888 288 +35 163.23187688161033 00:00:00 00:00:00 0.09607781547873054 78.61411210143966 -1.0 -1.0 -1.0 35.0 888 288 +36 515.446178879044 00:00:00 00:00:00 0.7537064890743636 11.417411530656945 -1.0 -1.0 -1.0 35.0 888 288 +37 392.6137583987218 00:00:00 00:00:00 0.3061610972555812 16.89212239318249 -1.0 -1.0 -1.0 35.0 888 288 +38 719.6387498724689 00:00:00 00:00:00 0.9391830061900761 22.483314792466093 -1.0 -1.0 -1.0 35.0 888 288 +39 1095.9625583277343 00:00:00 00:00:00 1.403367865995526 13.438743382703526 -1.0 -1.0 -1.0 35.0 888 288 +40 758.3297416501492 00:00:00 00:00:00 0.6073490122867862 12.85406115188814 -1.0 -1.0 -1.0 35.0 888 288 +41 196.42239794857238 00:00:00 00:00:00 0.14294900372874897 36.32216074676754 -1.0 -1.0 -1.0 35.0 888 288 +42 300.9638203550653 00:00:00 00:00:00 0.39351448198009875 13.974445331317181 -1.0 -1.0 -1.0 35.0 888 288 +43 819.1568769824953 00:00:00 00:00:00 0.8261842130673787 10.172610101290998 -1.0 -1.0 -1.0 35.0 888 288 +44 672.8493862791563 00:00:00 00:00:00 0.6972176074469371 13.890953706380596 -1.0 -1.0 -1.0 35.0 888 288 +45 171.9322515506212 00:00:00 00:00:00 0.13935267661456355 34.7413812643017 -1.0 -1.0 -1.0 35.0 888 288 +46 1425.9053080008052 00:00:00 00:00:00 1.7059914290120468 12.7456387947933 -1.0 -1.0 -1.0 35.0 888 288 +47 489.25638240698544 00:00:00 00:00:00 0.5445495899312375 16.173416507939116 -1.0 -1.0 -1.0 35.0 888 288 +48 303.6841850164093 00:00:00 00:00:00 0.2974040403504351 10.375992085957462 -1.0 -1.0 -1.0 35.0 888 288 +49 875.3662532129479 00:00:00 00:00:00 0.6489281792071373 10.214362638300399 -1.0 -1.0 -1.0 35.0 888 288 +50 716.6818881006753 00:00:00 00:00:00 0.6543631275867287 14.452157966062835 -1.0 -1.0 -1.0 35.0 888 288 +51 1780.2827105839892 00:00:00 00:00:00 1.3064991817591554 10.3884232109404 -1.0 -1.0 -1.0 35.0 888 288 +52 996.2724896444047 00:00:00 00:00:00 0.5101162519209581 13.213225298728211 -1.0 -1.0 -1.0 35.0 888 288 +53 1698.7961537425167 00:00:00 00:00:00 1.6116303277289115 12.607503410081835 -1.0 -1.0 -1.0 35.0 888 288 +54 1126.5413295499866 00:00:00 00:00:00 1.0047522761163254 17.051568734514 -1.0 -1.0 -1.0 35.0 888 288 +55 450.88973429474333 00:00:00 00:00:00 0.3469329170116606 10.69280211941846 -1.0 -1.0 -1.0 35.0 888 288 +56 973.3753477739596 00:00:00 00:00:00 1.145456105255157 17.506572347460136 -1.0 -1.0 -1.0 35.0 888 288 +57 1489.8174891533963 00:00:00 00:00:00 0.462264715753818 52.449273584894264 -1.0 -1.0 -1.0 35.0 888 288 +58 884.0458853234747 00:00:00 00:00:00 0.9162679525902213 15.638104651466023 -1.0 -1.0 -1.0 35.0 888 288 +59 1371.7618432631962 00:00:00 00:00:00 1.4395856562070026 11.871898609476887 -1.0 -1.0 -1.0 35.0 888 288 +60 779.9123200563506 00:00:00 00:00:00 0.7955326805432289 11.377991388220451 -1.0 -1.0 -1.0 35.0 888 288 +61 244.32835931715942 00:00:00 00:00:00 0.18847008443468843 22.08068896327773 -1.0 -1.0 -1.0 35.0 888 288 +62 170.42643162915402 00:00:00 00:00:00 0.15429769730152024 11.891868553016314 -1.0 -1.0 -1.0 35.0 888 288 +63 697.6397584685641 00:00:00 00:00:00 0.6321468327152712 10.141332615940971 -1.0 -1.0 -1.0 35.0 888 288 +64 896.3411776022986 00:00:00 00:00:00 0.8626798876083768 13.956903508254495 -1.0 -1.0 -1.0 35.0 888 288 +65 673.4550793235062 00:00:00 00:00:00 0.5256410065154973 34.96309565327556 -1.0 -1.0 -1.0 35.0 888 288 +66 409.3504561455062 00:00:00 00:00:00 0.3643567026018635 17.073302923618904 -1.0 -1.0 -1.0 35.0 888 288 +67 1279.5121634320094 00:00:00 00:00:00 1.774116920791602 12.06090823054981 -1.0 -1.0 -1.0 35.0 888 288 +68 455.1932713332237 00:00:00 00:00:00 0.6951890165407291 13.581352455460024 -1.0 -1.0 -1.0 35.0 888 288 +69 212.5080624683044 00:00:00 00:00:00 0.26945046670279904 72.07735525486979 -1.0 -1.0 -1.0 35.0 888 288 +70 208.83082789773542 00:00:00 00:00:00 0.11830405147347496 35.86335125977792 -1.0 -1.0 -1.0 35.0 888 288 +71 218.52612475416436 00:00:00 00:00:00 0.22535984531358233 30.226196813374152 -1.0 -1.0 -1.0 35.0 888 288 +72 1591.9222149862642 00:00:00 00:00:00 1.8110148288203143 12.417298253806685 -1.0 -1.0 -1.0 35.0 888 288 +73 218.79115225062986 00:00:00 00:00:00 0.07542156240114564 14.217908248312149 -1.0 -1.0 -1.0 35.0 888 288 +74 618.4593505087528 00:00:00 00:00:00 0.7604740031756679 11.664308801315117 -1.0 -1.0 -1.0 35.0 888 288 +75 652.0130715223407 00:00:00 00:00:00 0.7035573057923941 12.468451895755134 -1.0 -1.0 -1.0 35.0 888 288 +76 673.0857403351645 00:00:00 00:00:00 0.8590470713360423 14.050855469722556 -1.0 -1.0 -1.0 35.0 888 288 +77 785.3518302003876 00:00:00 00:00:00 1.1165757481045127 11.213356812360049 -1.0 -1.0 -1.0 35.0 888 288 +78 921.812608616067 00:00:00 00:00:00 0.5039827338921795 10.282098860310228 -1.0 -1.0 -1.0 35.0 888 288 +79 656.9337821238685 00:00:00 00:00:00 0.8078859860053357 11.233631650878838 -1.0 -1.0 -1.0 35.0 888 288 +80 1138.704218112882 00:00:00 00:00:00 1.4053123916613104 26.539797617550633 -1.0 -1.0 -1.0 35.0 888 288 +81 620.5653235040173 00:00:00 00:00:00 0.4217561559786075 21.45481401537115 -1.0 -1.0 -1.0 35.0 888 288 +82 838.2486260752073 00:00:00 00:00:00 0.9851844276179345 10.446614306626344 -1.0 -1.0 -1.0 35.0 888 288 +83 552.5443734504142 00:00:00 00:00:00 0.505423229336821 21.543404977178355 -1.0 -1.0 -1.0 35.0 888 288 +84 573.8507253223063 00:00:00 00:00:00 0.4967380348641639 11.084164653890928 -1.0 -1.0 -1.0 35.0 888 288 +85 420.6998183657156 00:00:00 00:00:00 0.45509970537168065 17.32850048985882 -1.0 -1.0 -1.0 35.0 888 288 +86 439.5439082870578 00:00:00 00:00:00 0.24326293577857605 35.833665311539534 -1.0 -1.0 -1.0 35.0 888 288 +87 1130.0482166914742 00:00:00 00:00:00 1.3476543758694524 12.35679852892303 -1.0 -1.0 -1.0 35.0 888 288 +88 450.20877052020955 00:00:00 00:00:00 0.2061046297575687 12.665759219918368 -1.0 -1.0 -1.0 35.0 888 288 +89 110.1657516020332 00:00:00 00:00:00 0.025901855591487423 18.200353247030144 -1.0 -1.0 -1.0 35.0 888 288 +90 443.8604241510417 00:00:00 00:00:00 0.3533866602834305 13.242335799793306 -1.0 -1.0 -1.0 35.0 888 288 +91 444.686556791516 00:00:00 00:00:00 0.4077832962117002 13.80369034602879 -1.0 -1.0 -1.0 35.0 888 288 +92 562.5781184397683 00:00:00 00:00:00 0.6835005902005468 10.27995831481763 -1.0 -1.0 -1.0 35.0 888 288 +93 167.62653574722364 00:00:00 00:00:00 0.02794717342101584 17.570916297751136 -1.0 -1.0 -1.0 35.0 888 288 +94 481.62428400668705 00:00:00 00:00:00 0.4565401559009469 11.826070143713975 -1.0 -1.0 -1.0 35.0 888 288 +95 475.5794761937849 00:00:00 00:00:00 0.5875361292861063 12.987091653999931 -1.0 -1.0 -1.0 35.0 888 288 +96 295.3711932454349 00:00:00 00:00:00 0.24218053028978936 11.871291318754192 -1.0 -1.0 -1.0 35.0 888 288 +97 1364.3326396162597 00:00:00 00:00:00 1.6079574502539073 10.696711455581303 -1.0 -1.0 -1.0 35.0 888 288 +98 584.9391974583868 00:00:00 00:00:00 0.6295707009501357 10.372529937559637 -1.0 -1.0 -1.0 35.0 888 288 +99 1076.886030040255 00:00:00 00:00:00 1.0553245057067546 14.901426606089192 -1.0 -1.0 -1.0 35.0 888 288 diff --git a/papers/lsst/Photometric/CRACO/zFrac.ecsv b/papers/lsst/Photometric/CRACO/zFrac.ecsv new file mode 100644 index 00000000..920edc9c --- /dev/null +++ b/papers/lsst/Photometric/CRACO/zFrac.ecsv @@ -0,0 +1,95 @@ +# %ECSV 1.0 +# --- +# datatype: +# - {name: TNS, datatype: string} +# - {name: DM, datatype: float64} +# - {name: RA, datatype: string} +# - {name: DEC, datatype: string} +# - {name: Z, datatype: float64} +# - {name: SNR, datatype: float64} +# - {name: WIDTH, datatype: float64} +# - {name: Gl, unit: deg, datatype: float64} +# - {name: Gb, unit: deg, datatype: float64} +# - {name: DMG, datatype: float64} +# - {name: FBAR, datatype: float64} +# - {name: BW, datatype: float64} +# meta: !!omap +# - {survey_data: '{"observing": {"NORM_FRB": 17,"TOBS": 64.68,"MAX_IW": 8, "MAXWMETH": 2, "Z_FRACTION": 24.7}, +# "telescope": {"BEAM": "CRACO_900", "DMMASK": "craco_900_mask.npy", +# "DIAM": 12.0, "NBEAMS": 1, "NBINS": 5, "FBAR": 906, +# "TRES": 13.8, "FRES": 1.0, "THRESH": 1.01}}'} +TNS DM RA DEC Z SNR WIDTH Gl Gb DMG FBAR BW +1 616.5738602258931 00:00:00 00:00:00 0.8905487730463858 10.160405042320036 -1.0 -1.0 -1.0 35.0 888 288 +2 946.6052448796315 00:00:00 00:00:00 0.6740538677349737 10.652772181822607 -1.0 -1.0 -1.0 35.0 888 288 +3 586.5727013781997 00:00:00 00:00:00 0.076308758479808 12.415341138097366 -1.0 -1.0 -1.0 35.0 888 288 +4 2153.3992719321486 00:00:00 00:00:00 0.7719105683125631 12.24289806494758 -1.0 -1.0 -1.0 35.0 888 288 +6 529.8895060996854 00:00:00 00:00:00 0.6148674711657728 13.133173041616157 -1.0 -1.0 -1.0 35.0 888 288 +8 412.87460299855934 00:00:00 00:00:00 0.20437591335992997 12.846680716802911 -1.0 -1.0 -1.0 35.0 888 288 +9 701.6217045820609 00:00:00 00:00:00 0.7971964864511405 10.341966201772214 -1.0 -1.0 -1.0 35.0 888 288 +10 205.82201397136845 00:00:00 00:00:00 0.035084600186595774 20.941567140489564 -1.0 -1.0 -1.0 35.0 888 288 +12 275.5937204880448 00:00:00 00:00:00 0.18195765690908733 91.38695892704916 -1.0 -1.0 -1.0 35.0 888 288 +13 436.349204138444 00:00:00 00:00:00 0.43889114213816116 15.275450861578175 -1.0 -1.0 -1.0 35.0 888 288 +14 295.6406494853049 00:00:00 00:00:00 0.20716845953629162 11.219409175951904 -1.0 -1.0 -1.0 35.0 888 288 +15 291.6471440894715 00:00:00 00:00:00 0.25353674073624666 11.18814447805394 -1.0 -1.0 -1.0 35.0 888 288 +16 1618.8558318442904 00:00:00 00:00:00 0.8938125640216341 24.749670081652773 -1.0 -1.0 -1.0 35.0 888 288 +17 983.9563468218174 00:00:00 00:00:00 0.5198177045244614 12.591584618769005 -1.0 -1.0 -1.0 35.0 888 288 +19 645.2399497353754 00:00:00 00:00:00 0.5654604349809239 10.286082085018421 -1.0 -1.0 -1.0 35.0 888 288 +20 539.5953081699104 00:00:00 00:00:00 0.5580214108898183 35.45318302068964 -1.0 -1.0 -1.0 35.0 888 288 +21 443.6632526190763 00:00:00 00:00:00 0.5313222499329159 14.319328661152863 -1.0 -1.0 -1.0 35.0 888 288 +22 81.45567951589115 00:00:00 00:00:00 0.03213122638515013 15.566080612070119 -1.0 -1.0 -1.0 35.0 888 288 +23 374.91893100970947 00:00:00 00:00:00 0.1090230764363179 29.766613679561118 -1.0 -1.0 -1.0 35.0 888 288 +25 711.626862396593 00:00:00 00:00:00 0.14735854842537668 13.229440860953233 -1.0 -1.0 -1.0 35.0 888 288 +26 356.4667997984884 00:00:00 00:00:00 0.4130361237696566 20.3929051321851 -1.0 -1.0 -1.0 35.0 888 288 +27 405.6849410353128 00:00:00 00:00:00 0.4022419934538231 12.020783061083032 -1.0 -1.0 -1.0 35.0 888 288 +28 438.56039965043755 00:00:00 00:00:00 0.3146015685795931 14.962222736067005 -1.0 -1.0 -1.0 35.0 888 288 +29 1032.0031340800927 00:00:00 00:00:00 1.029340230030751 12.249965125983609 -1.0 -1.0 -1.0 35.0 888 288 +30 217.45075943955328 00:00:00 00:00:00 0.19288729564479787 13.329278119983234 -1.0 -1.0 -1.0 35.0 888 288 +31 337.87773894962976 00:00:00 00:00:00 0.3614030695297573 15.842464362846469 -1.0 -1.0 -1.0 35.0 888 288 +32 747.9605061576287 00:00:00 00:00:00 0.39117198763502004 11.35266012864496 -1.0 -1.0 -1.0 35.0 888 288 +35 163.23187688161033 00:00:00 00:00:00 0.09607781547873054 78.61411210143966 -1.0 -1.0 -1.0 35.0 888 288 +37 392.6137583987218 00:00:00 00:00:00 0.3061610972555812 16.89212239318249 -1.0 -1.0 -1.0 35.0 888 288 +40 758.3297416501492 00:00:00 00:00:00 0.6073490122867862 12.85406115188814 -1.0 -1.0 -1.0 35.0 888 288 +41 196.42239794857238 00:00:00 00:00:00 0.14294900372874897 36.32216074676754 -1.0 -1.0 -1.0 35.0 888 288 +42 300.9638203550653 00:00:00 00:00:00 0.39351448198009875 13.974445331317181 -1.0 -1.0 -1.0 35.0 888 288 +43 819.1568769824953 00:00:00 00:00:00 0.8261842130673787 10.172610101290998 -1.0 -1.0 -1.0 35.0 888 288 +45 171.9322515506212 00:00:00 00:00:00 0.13935267661456355 34.7413812643017 -1.0 -1.0 -1.0 35.0 888 288 +47 489.25638240698544 00:00:00 00:00:00 0.5445495899312375 16.173416507939116 -1.0 -1.0 -1.0 35.0 888 288 +48 303.6841850164093 00:00:00 00:00:00 0.2974040403504351 10.375992085957462 -1.0 -1.0 -1.0 35.0 888 288 +49 875.3662532129479 00:00:00 00:00:00 0.6489281792071373 10.214362638300399 -1.0 -1.0 -1.0 35.0 888 288 +50 716.6818881006753 00:00:00 00:00:00 0.6543631275867287 14.452157966062835 -1.0 -1.0 -1.0 35.0 888 288 +51 1780.2827105839892 00:00:00 00:00:00 1.3064991817591554 10.3884232109404 -1.0 -1.0 -1.0 35.0 888 288 +52 996.2724896444047 00:00:00 00:00:00 0.5101162519209581 13.213225298728211 -1.0 -1.0 -1.0 35.0 888 288 +54 1126.5413295499866 00:00:00 00:00:00 1.0047522761163254 17.051568734514 -1.0 -1.0 -1.0 35.0 888 288 +55 450.88973429474333 00:00:00 00:00:00 0.3469329170116606 10.69280211941846 -1.0 -1.0 -1.0 35.0 888 288 +57 1489.8174891533963 00:00:00 00:00:00 0.462264715753818 52.449273584894264 -1.0 -1.0 -1.0 35.0 888 288 +61 244.32835931715942 00:00:00 00:00:00 0.18847008443468843 22.08068896327773 -1.0 -1.0 -1.0 35.0 888 288 +62 170.42643162915402 00:00:00 00:00:00 0.15429769730152024 11.891868553016314 -1.0 -1.0 -1.0 35.0 888 288 +64 896.3411776022986 00:00:00 00:00:00 0.8626798876083768 13.956903508254495 -1.0 -1.0 -1.0 35.0 888 288 +65 673.4550793235062 00:00:00 00:00:00 0.5256410065154973 34.96309565327556 -1.0 -1.0 -1.0 35.0 888 288 +66 409.3504561455062 00:00:00 00:00:00 0.3643567026018635 17.073302923618904 -1.0 -1.0 -1.0 35.0 888 288 +68 455.1932713332237 00:00:00 00:00:00 0.6951890165407291 13.581352455460024 -1.0 -1.0 -1.0 35.0 888 288 +69 212.5080624683044 00:00:00 00:00:00 0.26945046670279904 72.07735525486979 -1.0 -1.0 -1.0 35.0 888 288 +70 208.83082789773542 00:00:00 00:00:00 0.11830405147347496 35.86335125977792 -1.0 -1.0 -1.0 35.0 888 288 +71 218.52612475416436 00:00:00 00:00:00 0.22535984531358233 30.226196813374152 -1.0 -1.0 -1.0 35.0 888 288 +73 218.79115225062986 00:00:00 00:00:00 0.07542156240114564 14.217908248312149 -1.0 -1.0 -1.0 35.0 888 288 +74 618.4593505087528 00:00:00 00:00:00 0.7604740031756679 11.664308801315117 -1.0 -1.0 -1.0 35.0 888 288 +75 652.0130715223407 00:00:00 00:00:00 0.7035573057923941 12.468451895755134 -1.0 -1.0 -1.0 35.0 888 288 +76 673.0857403351645 00:00:00 00:00:00 0.8590470713360423 14.050855469722556 -1.0 -1.0 -1.0 35.0 888 288 +79 656.9337821238685 00:00:00 00:00:00 0.8078859860053357 11.233631650878838 -1.0 -1.0 -1.0 35.0 888 288 +80 1138.704218112882 00:00:00 00:00:00 1.4053123916613104 26.539797617550633 -1.0 -1.0 -1.0 35.0 888 288 +81 620.5653235040173 00:00:00 00:00:00 0.4217561559786075 21.45481401537115 -1.0 -1.0 -1.0 35.0 888 288 +83 552.5443734504142 00:00:00 00:00:00 0.505423229336821 21.543404977178355 -1.0 -1.0 -1.0 35.0 888 288 +84 573.8507253223063 00:00:00 00:00:00 0.4967380348641639 11.084164653890928 -1.0 -1.0 -1.0 35.0 888 288 +85 420.6998183657156 00:00:00 00:00:00 0.45509970537168065 17.32850048985882 -1.0 -1.0 -1.0 35.0 888 288 +86 439.5439082870578 00:00:00 00:00:00 0.24326293577857605 35.833665311539534 -1.0 -1.0 -1.0 35.0 888 288 +88 450.20877052020955 00:00:00 00:00:00 0.2061046297575687 12.665759219918368 -1.0 -1.0 -1.0 35.0 888 288 +89 110.1657516020332 00:00:00 00:00:00 0.025901855591487423 18.200353247030144 -1.0 -1.0 -1.0 35.0 888 288 +90 443.8604241510417 00:00:00 00:00:00 0.3533866602834305 13.242335799793306 -1.0 -1.0 -1.0 35.0 888 288 +91 444.686556791516 00:00:00 00:00:00 0.4077832962117002 13.80369034602879 -1.0 -1.0 -1.0 35.0 888 288 +92 562.5781184397683 00:00:00 00:00:00 0.6835005902005468 10.27995831481763 -1.0 -1.0 -1.0 35.0 888 288 +93 167.62653574722364 00:00:00 00:00:00 0.02794717342101584 17.570916297751136 -1.0 -1.0 -1.0 35.0 888 288 +94 481.62428400668705 00:00:00 00:00:00 0.4565401559009469 11.826070143713975 -1.0 -1.0 -1.0 35.0 888 288 +95 475.5794761937849 00:00:00 00:00:00 0.5875361292861063 12.987091653999931 -1.0 -1.0 -1.0 35.0 888 288 +96 295.3711932454349 00:00:00 00:00:00 0.24218053028978936 11.871291318754192 -1.0 -1.0 -1.0 35.0 888 288 +98 584.9391974583868 00:00:00 00:00:00 0.6295707009501357 10.372529937559637 -1.0 -1.0 -1.0 35.0 888 288 +99 1076.886030040255 00:00:00 00:00:00 1.0553245057067546 14.901426606089192 -1.0 -1.0 -1.0 35.0 888 288 diff --git a/papers/lsst/Photometric/README.txt b/papers/lsst/Photometric/README.txt new file mode 100644 index 00000000..24726475 --- /dev/null +++ b/papers/lsst/Photometric/README.txt @@ -0,0 +1,25 @@ +The scripts in this folder were written by Bryce Smith, +with minor adaptations by C.W. James. + +The intention is to evaluate the effect of photometric redshifts +on H0 estimation using a simple 1D scan. + +The order of operation is: + +1: python create_fake_surveys.py + +This generates fake surveys for CRACO and MeerTRAP +For each, there are four: base, with mag limit, with photometric smearing, and with both. + +2: run_H0_slice.py + +This runs a slice through H0 over all surveys. Data are saved in directory H0. +All eight surveys (MeerTRAP and CRACO) are expected to be run at the same time + +python run_H0_slice.py -n 101 --min=50 --max=100 -f CRACO/Smeared CRACO/zFrac CRACO/Spectroscopic CRACO/Smeared_and_zFrac MeerTRAP/Smeared MeerTRAP/zFrac MeerTRAP/Spectroscopic MeerTRAP/Smeared_and_zFrac + +3: run python plot_h0_slice +generates the figure H0 scan_linear + +4: run python plot_2D_grids.py +Generates plots of the 2D grids for each fake survey diff --git a/papers/lsst/Photometric/create_fake_survey.py b/papers/lsst/Photometric/create_fake_survey.py new file mode 100644 index 00000000..d3e36453 --- /dev/null +++ b/papers/lsst/Photometric/create_fake_survey.py @@ -0,0 +1,223 @@ +import os +from matplotlib import pyplot as plt +from numpy import random +import numpy as np +from matplotlib import pyplot as plt +import importlib.resources as resources + +from astropy.cosmology import Planck18 +from zdm import cosmology as cos +from zdm import figures +from zdm import parameters +from zdm import survey +from zdm import pcosmic +from zdm import iteration as it +from zdm import loading +from zdm import io +from zdm import optical as opt +from zdm import states +from zdm import survey + + +def create_fake_survey(smearing=False,Survey="CRACO"): + """ + Creates four fake survey files + """ + sdir = str(resources.files('zdm').joinpath('data/Surveys')) + + if Survey == "CRACO": + Prefix,Suffix = load_craco_text() + name=['CRAFT_CRACO_900'] + opdir="./CRACO/" # directory to place fake surveys in. Here! + elif Survey == "MeerTRAP": + Prefix,Suffix = load_meertrap_text() + name=['MeerTRAPcoherent'] + opdir="./MeerTRAP/" # directory to place fake surveys in. Here! + else: + raise ValueError("Survey ",Survey," not recognised") + + # make output directories + if not os.path.isdir(opdir): + os.mkdir(opdir) + + #param_dict={'sfr_n': 0.21, 'alpha': 0.11, 'lmean': 2.18, 'lsigma': 0.42, 'lEmax': 41.37, + # 'lEmin': 39.47, 'gamma': -1.04, 'H0': 70.23, 'halo_method': 0, 'sigmaDMG': 0.0, 'sigmaHalo': 0.0,'lC': -7.61} + + # use default state + state=states.load_state(case="HoffmannHalo25",scat=None,rep=None) + + + + ss,gs=loading.surveys_and_grids(survey_names=name,repeaters=False,init_state=state,sdir=sdir) + gs=gs[0] + #gs.state.photo.smearing=smearing + gs.calc_rates() + samples=gs.GenMCSample(100) + zvals=np.zeros(len(samples)) + fp=open(opdir+"Spectroscopic.ecsv","w+") + fp.write(Prefix+Suffix) + fp.write("TNS DM RA DEC Z SNR WIDTH Gl Gb DMG FBAR BW\n") + for i in range(len(samples)): + fp.write('{0:5}'.format(str(i))) + fp.write('{0:20}'.format(str(samples[i][1]+35))) + fp.write('{0:10}'.format("00:00:00")) + fp.write('{0:10}'.format("00:00:00")) + fp.write('{0:25}'.format(str(samples[i][0]))) + fp.write('{0:25}'.format(str(samples[i][3]*10))) + fp.write('{0:8}'.format("-1.0")) + fp.write('{0:8}'.format("-1.0")) + fp.write('{0:8}'.format("-1.0")) + fp.write('{0:8}'.format("35.0")) + fp.write('{0:8}'.format("888")) + fp.write('{0:8}'.format("288")) + fp.write("\n") + fp.close() + + # We now smear the redshift values by the z-error + + if smearing is True: + sigmas=np.array([0.035]) + for sigma in sigmas: + for i in range(len(samples)): + zvals[i]=samples[i][0] + + fp=open(opdir+"Smeared.ecsv","w+") + fp.write(Prefix+', "Z_PHOTO": 0.03'+Suffix) + fp.write("TNS DM RA DEC Z SNR WIDTH Gl Gb DMG FBAR BW\n") + smear_error=random.normal(loc=0,scale=sigma,size=100) + newvals=zvals+smear_error + for i in range(len(samples)): + fp.write('{0:5}'.format(str(i))) + fp.write('{0:20}'.format(str(samples[i][1]+35))) + fp.write('{0:10}'.format("00:00:00")) + fp.write('{0:10}'.format("00:00:00")) + fp.write('{0:25}'.format(str(newvals[i]))) + fp.write('{0:25}'.format(str(samples[i][3]*10))) + fp.write('{0:8}'.format("-1.0")) + fp.write('{0:8}'.format("-1.0")) + fp.write('{0:8}'.format("-1.0")) + fp.write('{0:8}'.format("35.0")) + fp.write('{0:8}'.format("888")) + fp.write('{0:8}'.format("288")) + fp.write("\n") + fp.close() + + frac_path = str(resources.files('zdm').joinpath('../papers/lsst/Data')) + + if Survey == "CRACO": + fz=np.load(frac_path+"/fz_24.7.npy")[0:500] + elif Survey == "MeerTRAP": + fz=np.load(frac_path+"/fz_27.5.npy")[0:500] + + zs=np.load(frac_path+"/zvals.npy")[0:500] + + fp=open(opdir+"zFrac.ecsv","w+") + fp1=open(opdir+"Smeared_and_zFrac.ecsv","w+") + + fp.write(Prefix+', "Z_FRACTION": 24.7'+Suffix) + fp1.write(Prefix+', "Z_FRACTION": 24.7, "Z_PHOTO": 0.03'+Suffix) + + fp.write("TNS DM RA DEC Z SNR WIDTH Gl Gb DMG FBAR BW\n") + fp1.write("TNS DM RA DEC Z SNR WIDTH Gl Gb DMG FBAR BW\n") + for i in range(len(samples)): + prob_thresh=random.rand() + j=np.where(zs>samples[i][0]-0.005)[0][0] + prob=fz[j] + if prob>=prob_thresh: + fp.write('{0:5}'.format(str(i))) + fp.write('{0:20}'.format(str(samples[i][1]+35))) + fp.write('{0:10}'.format("00:00:00")) + fp.write('{0:10}'.format("00:00:00")) + fp.write('{0:25}'.format(str(samples[i][0]))) + fp.write('{0:25}'.format(str(samples[i][3]*10))) + fp.write('{0:8}'.format("-1.0")) + fp.write('{0:8}'.format("-1.0")) + fp.write('{0:8}'.format("-1.0")) + fp.write('{0:8}'.format("35.0")) + fp.write('{0:8}'.format("888")) + fp.write('{0:8}'.format("288")) + fp.write("\n") + + fp1.write('{0:5}'.format(str(i))) + fp1.write('{0:20}'.format(str(samples[i][1]+35))) + fp1.write('{0:10}'.format("00:00:00")) + fp1.write('{0:10}'.format("00:00:00")) + fp1.write('{0:25}'.format(str(samples[i][0]+smear_error[i]))) + fp1.write('{0:25}'.format(str(samples[i][3]*10))) + fp1.write('{0:8}'.format("-1.0")) + fp1.write('{0:8}'.format("-1.0")) + fp1.write('{0:8}'.format("-1.0")) + fp1.write('{0:8}'.format("35.0")) + fp1.write('{0:8}'.format("888")) + fp1.write('{0:8}'.format("288")) + fp1.write("\n") + + fp.close() + fp1.close() + + +def load_meertrap_text(): + """ + returns CRACO prefixes and suffixes + """ + + + Prefix="""# %ECSV 1.0 +# --- +# datatype: +# - {name: TNS, datatype: string} +# - {name: DM, datatype: float64} +# - {name: RA, datatype: string} +# - {name: DEC, datatype: string} +# - {name: Z, datatype: float64} +# - {name: SNR, datatype: float64} +# - {name: WIDTH, datatype: float64} +# - {name: Gl, unit: deg, datatype: float64} +# - {name: Gb, unit: deg, datatype: float64} +# - {name: DMG, datatype: float64} +# - {name: FBAR, datatype: float64} +# - {name: BW, datatype: float64} +# meta: !!omap +# - {survey_data: '{"observing": {"NORM_FRB": 1,"TOBS": 317.5""" +# we need to split the obs string into two so we can insert the zfraction as required + Suffix="""}, +# "telescope": {"BEAM": "MeerTRAP_coherent_log", +# "BTHRESH": 0.25,"NBEAMS": 1,"NBINS": 5, +# "FRES":0.836,"THRESH":0.069, +# "TRES": 0.30624, "FBAR":1284}}'}\n""" + + return Prefix, Suffix + +def load_craco_text(): + """ + returns CRACO prefixes and suffixes + """ + + + Prefix="""# %ECSV 1.0 +# --- +# datatype: +# - {name: TNS, datatype: string} +# - {name: DM, datatype: float64} +# - {name: RA, datatype: string} +# - {name: DEC, datatype: string} +# - {name: Z, datatype: float64} +# - {name: SNR, datatype: float64} +# - {name: WIDTH, datatype: float64} +# - {name: Gl, unit: deg, datatype: float64} +# - {name: Gb, unit: deg, datatype: float64} +# - {name: DMG, datatype: float64} +# - {name: FBAR, datatype: float64} +# - {name: BW, datatype: float64} +# meta: !!omap +# - {survey_data: '{"observing": {"NORM_FRB": 17,"TOBS": 64.68,"MAX_IW": 8, "MAXWMETH": 2""" +# we need to split the obs string into two so we can insert the zfraction as required + Suffix="""}, +# "telescope": {"BEAM": "CRACO_900", "DMMASK": "craco_900_mask.npy", +# "DIAM": 12.0, "NBEAMS": 1, "NBINS": 5, "FBAR": 906, +# "TRES": 13.8, "FRES": 1.0, "THRESH": 1.01}}'}\n""" + + return Prefix, Suffix + +create_fake_survey(True,Survey="CRACO") +create_fake_survey(True,Survey="MeerTRAP") diff --git a/papers/lsst/Photometric/plot_2dgrids.py b/papers/lsst/Photometric/plot_2dgrids.py new file mode 100644 index 00000000..23897daa --- /dev/null +++ b/papers/lsst/Photometric/plot_2dgrids.py @@ -0,0 +1,89 @@ +""" +Evaluates the likelihood of a slice through H0 for various surveys +""" + +import argparse +import numpy as np +import os + +from zdm import figures +from zdm import iteration as it +from zdm import loading +from zdm import parameters +from zdm import repeat_grid as zdm_repeat_grid +from zdm import MCMC +from zdm import survey +from zdm import states +from astropy.cosmology import Planck18 +import importlib.resources as resources +from numpy import random +import matplotlib.pyplot as plt +import time + +def main(Survey="CRACO",zmax=3,DMmax=3000): + """ + Plots 2D zDM grids + """ + # Set state + state=states.load_state(case="HoffmannHalo25",scat=None,rep=None) + sdir = resources.files('zdm').joinpath('../papers/lsst/Photometric') + names = ["Spectroscopic","Smeared","zFrac","Smeared_and_zFrac"] + for i,name in enumerate(names): + names[i] = Survey+"/"+name + xlabels = ["$z_{\\rm spec}$","$z_{\\rm photo}$","$z_{\\rm spec}$","$z_{\\rm photo}$"] + ss,gs = loading.surveys_and_grids( + survey_names=names,repeaters=False,init_state=state,sdir=sdir) + plot_grids(gs,ss,"./",xlabels,DMmax=DMmax,zmax=zmax) + + +#============================================================================== +""" +Function: plot_grids +Date: 10/01/2024 +Purpose: + Plot grids. Adapted from zdm/scripts/plot_pzdm_grid.py + +Imports: + grids = list of grids + surveys = list of surveys + outdir = output directory + val = parameter value for this grid +""" +def plot_grids(grids, surveys, outdir,xlabels,zmax=3,DMmax=3000): + for i,g in enumerate(grids): + s = surveys[i] + zvals=[] + dmvals=[] + nozlist=[] + + if s.zlist is not None: + for iFRB in s.zlist: + zvals.append(s.Zs[iFRB]) + dmvals.append(s.DMEGs[iFRB]) + if s.nozlist is not None: + for dm in s.DMEGs[s.nozlist]: + nozlist.append(dm) + + frbzvals = np.array(zvals) + frbdmvals = np.array(dmvals) + + figures.plot_grid( + g.rates, + g.zvals, + g.dmvals, + name=outdir + s.name + "_zDM.png", + norm=3, + log=True, + label="$\\log_{10} p({\\rm DM}_{\\rm EG},z)$ [a.u.]", + xlabel=xlabels[i], + project=False, + FRBDMs=frbdmvals, + FRBZs=frbzvals, + Aconts=[0.01, 0.1, 0.5], + zmax=zmax, + DMmax=DMmax, + # DMlines=nozlist, + ) + +main(Survey="CRACO") +main(Survey="MeerTRAP",zmax=4,DMmax=4000) diff --git a/papers/lsst/Photometric/plot_H0_slice.py b/papers/lsst/Photometric/plot_H0_slice.py new file mode 100644 index 00000000..d592174b --- /dev/null +++ b/papers/lsst/Photometric/plot_H0_slice.py @@ -0,0 +1,119 @@ +""" +Generates a plot of previously calculated slice through H0 +""" + +import argparse +import numpy as np +import os + +from zdm import figures +from zdm import iteration as it + +from zdm import parameters +from zdm import repeat_grid as zdm_repeat_grid +from zdm import MCMC +from zdm import survey +from zdm import states +from astropy.cosmology import Planck18 + +from numpy import random +import matplotlib.pyplot as plt +import time + +import matplotlib +defaultsize=18 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + +def main(istart=0): + """ + Main routine to create plots and extract characteristic parameters + + istart tells us which count to begin on + """ + ll_lists=np.load("H0/ll_lists.npy") + vals = np.load("H0/h0vals.npy") + + nh,ns = ll_lists.shape + ns = 4 # hard-code to plot CRACO only! + + plt.figure() + linestyles=["-","--",":","-."] + # up to the user to get thius order right! Use e.g. + # python run_H0_slice.py -n 10 --min=50 --max=100 -f Spectroscopic Smeared zFrac Smeared_and_zFrac + + s_names=["All hosts, spec-zs","$\\sigma_z=0.035$","$m_r^{\\rm lim}=24.7$","$\\sigma_z=0.035,~m_r^{\\rm lim}=24.7$"] + plt.clf() + llsum = np.zeros(ll_lists.shape[0]) + FWHM=[] + for i in np.arange(ns): + + lls = ll_lists[:, i+istart] + + lls[lls < -1e10] = -np.inf + lls[np.argwhere(np.isnan(lls))] = -np.inf + + llsum += lls + + lls = lls - np.max(lls) + lls=10**lls + index1=np.where(lls>=0.5)[0][0] + index2=np.where(lls>=0.5)[0][-1] + + root1 = vals[index1-1] * (lls[index1]-0.5)/(lls[index1]-lls[index1-1])\ + + vals[index1] * (0.5-lls[index1-1])/(lls[index1]-lls[index1-1]) + + root2 = vals[index2+1] * (lls[index2]-0.5)/(lls[index2]-lls[index2+1])\ + + vals[index2] * (0.5-lls[index2+1])/(lls[index2]-lls[index2+1]) + + + #root1=vals[index1-1]-(0.5-lls[index1-1])*(vals[index1]-vals[index1-1])/(lls[index1]-lls[index1-1]) + #root2=vals[index2]-(0.5-lls[index2])*(vals[index2+1]-vals[index2])/(lls[index2+1]-lls[index2]) + FWHM.append(root2-root1) + # plt.figure() + # plt.clf() + plt.plot(vals, lls, label=s_names[i],ls=linestyles[i]) + plt.xlabel('$H_0$ (km s$^{-1}$ Mpc$^{-1}$)',fontsize=14) + plt.ylabel('$\\frac{\\mathcal{L}}{max(\\mathcal{L})}$',fontsize=18) + # plt.savefig(os.path.join(outdir, s.name + ".pdf")) + #print("Max H0:",vals[np.where(lls==1.0)[0][0]]) + + plt.minorticks_on() + plt.tick_params(axis='y', which='major', labelsize=14) # To set tick label fontsize + plt.tick_params(axis='y', which='major', length=9) # To set tick size + plt.tick_params(axis='y', which='minor', length=4.5) # To set tick size + plt.tick_params(axis='y', which='both',direction='in',right='on', top='on') + + plt.tick_params(axis='x', which='major', labelsize=14) # To set tick label fontsize + plt.tick_params(axis='x', which='major', length=9) # To set tick size + plt.tick_params(axis='x', which='minor', length=4.5) # To set tick size + plt.tick_params(axis='x', which='both',direction='in',right='on', top='on') + + #peak=vals[np.argwhere(llsum == np.max(llsum))[0]] + plt.xlim(60,90) + plt.ylim(0,1) + + + plt.plot([70.63,70.63],[0,1],color="black",linestyle=":") + plt.text(69,0.06,"$H_0^{\\rm sim}$",rotation=90,fontsize=14) + + #plt.axvline(peak,ls='--') + #plt.legend(loc='upper left') + plt.legend(fontsize=10) + plt.tight_layout() + if istart==0: + plt.savefig("H0_scan_linear.png") + else: + plt.savefig("MeerKAT_H0_scan_linear.png") + percentage=(FWHM/FWHM[0]-1)*100 + for i,name in enumerate(s_names): + print(name," FWHM is ",FWHM[i]," frac is ",percentage[i]) + #print("FWHM:Spectroscopic,Photometric,zFrac,Photometric+zfrac\n",FWHM,percentage) + +print("Results for CRACO") +main(istart=0) +print("\n\nResults for MeerTRAP") +main(istart=4) diff --git a/papers/lsst/Photometric/run_H0_slice.py b/papers/lsst/Photometric/run_H0_slice.py new file mode 100644 index 00000000..8c724fa8 --- /dev/null +++ b/papers/lsst/Photometric/run_H0_slice.py @@ -0,0 +1,100 @@ +""" +Evaluates the likelihood of a slice through H0 for various surveys +""" + +import argparse +import numpy as np +import os + +from zdm import figures +from zdm import iteration as it + +from zdm import parameters +from zdm import repeat_grid as zdm_repeat_grid +from zdm import MCMC +from zdm import survey +from zdm import states +from astropy.cosmology import Planck18 + +from numpy import random +import matplotlib.pyplot as plt +import time + +def main(): + """ + run with: + python run_H0_slice.py -n 10 --min=50 --max=100 -f CRACO/Smeared CRACO/zFrac CRACO/Spectroscopic CRACO/Smeared_and_zFrac MeerTRAP/Smeared MeerTRAP/zFrac MeerTRAP/Spectroscopic MeerTRAP/Smeared_and_zFrac + + """ + t0 = time.time() + parser = argparse.ArgumentParser() + #parser.add_argument(dest='param',type=str,help="Parameter to do the slice in") + parser.add_argument('--min',type=float,help="Min value") + parser.add_argument('--max',type=float,help="Max value") + parser.add_argument('-f', '--files', default=None, nargs='+', type=str, help="Survey file names") + parser.add_argument('-n',dest='n',type=int,default=50,help="Number of values") + # parser.add_argument('-r',dest='repeaters',default=False,action='store_true',help="Surveys are repeater surveys") + args = parser.parse_args() + + vals = np.linspace(args.min, args.max, args.n) + + # Set state + state=states.load_state(case="HoffmannHalo25",scat=None,rep=None) + + grid_params = {} + grid_params['dmmax'] = 7000.0 + grid_params['ndm'] = 1400 + grid_params['nz'] = 500 + ddm = grid_params['dmmax'] / grid_params['ndm'] + dmvals = (np.arange(grid_params['ndm']) + 1) * ddm + + # Initialise surveys + surveys = [] + if args.files is not None: + for survey_name in args.files: + print("Loading survey ",survey_name) + s = survey.load_survey(survey_name, state, dmvals,sdir="./") + surveys.append(s) + + + # state.update_param('halo_method', 1) + # state.update_param(args.param, vals[0]) + + outdir = 'H0/' + if not os.path.exists(outdir): + os.makedirs(outdir) + + ll_lists = [] + for val in vals: + print("val:", val) + param = {"H0": {'min': -np.inf, 'max': np.inf}} + ll=0 + ll_list=[] + sll, sll_list = MCMC.calc_log_posterior([val], state, param,[surveys,[]], grid_params, ind_surveys=True)#,psnr=True) + + ll_lists.append(sll_list) + + + ll_lists = np.asarray(ll_lists) + np.save(outdir+"ll_lists.npy",ll_lists) + np.save(outdir+"h0vals.npy",vals) + +#============================================================================== +""" +Function: commasep +Date: 23/08/2022 +Purpose: + Turn a string of variables seperated by commas into a list + +Imports: + s = String of variables + +Exports: + List conversion of s +""" +def commasep(s): + return list(map(str, s.split(','))) + +#============================================================================== + +main() diff --git a/papers/lsst/Photometric/run_slice.py b/papers/lsst/Photometric/run_slice.py new file mode 100644 index 00000000..e804c985 --- /dev/null +++ b/papers/lsst/Photometric/run_slice.py @@ -0,0 +1,229 @@ +import argparse +import numpy as np +import os + +from zdm import figures +from zdm import iteration as it + +from zdm import parameters +from zdm import repeat_grid as zdm_repeat_grid +from zdm import MCMC +from zdm import survey +from astropy.cosmology import Planck18 + +from numpy import random +import matplotlib.pyplot as plt +import time + + +def main(): + """ + Run with: + python run_H0_slice.py -n 91 --min=60 --max=90 -f CRACO/Spectroscopic CRACO/Smeared CRACO/zFrac CRACO/Smeared_and_zFrac MeerTRAP/Spectroscopic MeerTRAP/Smeared MeerTRAP/zFrac MeerTRAP/Smeared_and_zFrac + """ + + t0 = time.time() + parser = argparse.ArgumentParser() + parser.add_argument(dest='param',type=str,help="Parameter to do the slice in") + parser.add_argument(dest='min',type=float,help="Min value") + parser.add_argument(dest='max',type=float,help="Max value") + parser.add_argument('-f', '--files', default=None, nargs='+', type=str, help="Survey file names") + parser.add_argument('-s', '--smeared_surveys', default=None, nargs='+', type=str, help="Surveys with smeared z-vals") + parser.add_argument('-n',dest='n',type=int,default=50,help="Number of values") + # parser.add_argument('-r',dest='repeaters',default=False,action='store_true',help="Surveys are repeater surveys") + args = parser.parse_args() + + vals = np.linspace(args.min, args.max, args.n) + + # Set state + state = parameters.State() + state.set_astropy_cosmo(Planck18) + # param_dict={'sfr_n': 1.13, 'alpha': 1.5, 'lmean': 2.27, 'lsigma': 0.55, + # 'lEmax': 41.26, 'lEmin': 39.5, 'gamma': -0.95, 'H0': 73, + # 'min_lat': 0.0, 'sigmaDMG': 0.0, 'sigmaHalo': 20.0} + # param_dict={'sfr_n': 0.8808527057055584, 'alpha': 0.7895161131856694, + # 'lmean': 2.1198711983468064, 'lsigma': 0.44944780033763343, + # 'lEmax': 41.18671139482926, 'lEmin': 39.81049090314043, 'gamma': -1.1558450520609953, + # 'H0': 54.6887137195215, 'halo_method': 0, 'sigmaDMG': 0.0, 'sigmaHalo': 0.0, 'min_lat': 30.0} + param_dict={'sfr_n': 1.7294049204398037, 'alpha': 1.4859524003747502, + 'lmean': 2.3007428869522486, 'lsigma': 0.396300210604263, + 'lEmax': 41.0, 'lEmin': 38.35533894604933, 'gamma': 0.6032500201815869, + 'H0': 70.51322705185869, 'DMhalo': 39.800465306883666} + # param_dict={'lEmax': 40.578551786703116} + state.update_params(param_dict) + + state.update_param('Rgamma', -2.2) + state.update_param('lRmax', 3.0) + state.update_param('lRmin', -4.0) + #state.update_param('min_lat', 30.0) + #smeared state##################### + s_state=parameters.State() + s_state.set_astropy_cosmo(Planck18) + s_state.update_params(param_dict) + s_state.update_param('Rgamma',-2.2) + s_state.update_param('lRmax', 3.0) + s_state.update_param('lRmin', -4.0) + #################################3 + + + # Initialise surveys + surveys_sep = [[], []] + s_surveys_sep=[[],[]] + + grid_params = {} + grid_params['dmmax'] = 7000.0 + grid_params['ndm'] = 1400 + grid_params['nz'] = 500 + ddm = grid_params['dmmax'] / grid_params['ndm'] + dmvals = (np.arange(grid_params['ndm']) + 1) * ddm + + if args.files is not None: + for survey_name in args.files: + s = survey.load_survey(survey_name, state, dmvals) + surveys_sep[0].append(s) + + if args.smeared_surveys is not None: + for survey_name in args.smeared_surveys: + s_state.photo.smearing=True + s_state.photo.sigma=float(survey_name) + s = survey.load_survey(survey_name, s_state, dmvals) + s_surveys_sep[0].append(s) + t1 = time.time() + print("Step 1: ", str(t1-t0), flush=True) + + # state.update_param('halo_method', 1) + # state.update_param(args.param, vals[0]) + + outdir = 'cube/' + args.param + '/' + if not os.path.exists(outdir): + os.makedirs(outdir) + + ll_lists = [] + for val in vals: + print("val:", val) + param = {args.param: {'min': -np.inf, 'max': np.inf}} + ll, ll_list = MCMC.calc_log_posterior([val], state, param, surveys_sep, grid_params, ind_surveys=True)#,psnr=True) + for i, surveys in enumerate(args.smeared_surveys): + s_state.photo.sigma=float(surveys) + ss_surveys_sep=[[],[]] + ss_surveys_sep[0].append(s_surveys_sep[0][i]) + sll, sll_list = MCMC.calc_log_posterior([val], s_state, param, ss_surveys_sep, grid_params, ind_surveys=True)#,psnr=True) + for s in sll_list: + ll_list.append(s) + ll+=sll + print(ll, ll_list) + ll_lists.append(ll_list) + t2 = time.time() + print("Step 2: ", str(t2-t1), flush=True) + t1 = t2 + print(ll_lists) + ll_lists = np.asarray(ll_lists) + + plt.figure() + plt.clf() + llsum = np.zeros(ll_lists.shape[0]) + surveys = surveys_sep[0] + surveys_sep[1]+s_surveys_sep[0] + for i in range(len(surveys)): + s = surveys[i] + lls = ll_lists[:, i] + + lls[lls < -1e10] = -np.inf + lls[np.argwhere(np.isnan(lls))] = -np.inf + + llsum += lls + + lls = lls - np.max(lls) + #lls=10**lls + # plt.figure() + # plt.clf() + plt.plot(vals, lls, label=s.name) + plt.xlabel(args.param) + plt.ylabel('log likelihood') + # plt.savefig(os.path.join(outdir, s.name + ".pdf")) + + print(vals) + print(llsum) + peak=vals[np.argwhere(llsum == np.max(llsum))[0]] + print("peak", peak) + #plt.axvline(peak,ls='--') + plt.legend() + plt.savefig(outdir + args.param + ".pdf") + + # llsum = llsum - np.max(llsum) + # llsum[llsum < -1e10] = -np.inf + plt.figure() + plt.clf() + plt.plot(vals, llsum, label='Total') + plt.axvline(peak,ls='--') + # plt.plot(vals, llsum2) + plt.xlabel(args.param) + plt.ylabel('log likelihood') + plt.legend() + plt.savefig(outdir + args.param + "_sum.pdf") + +#============================================================================== +""" +Function: plot_grids +Date: 10/01/2024 +Purpose: + Plot grids. Adapted from zdm/scripts/plot_pzdm_grid.py + +Imports: + grids = list of grids + surveys = list of surveys + outdir = output directory + val = parameter value for this grid +""" +def plot_grids(grids, surveys, outdir, val): + for g,s in zip(grids, surveys): + zvals=[] + dmvals=[] + nozlist=[] + + if s.zlist is not None: + for iFRB in s.zlist: + zvals.append(s.Zs[iFRB]) + dmvals.append(s.DMEGs[iFRB]) + if s.nozlist is not None: + for dm in s.DMEGs[s.nozlist]: + nozlist.append(dm) + + frbzvals = np.array(zvals) + frbdmvals = np.array(dmvals) + + figures.plot_grid( + g.rates, + g.zvals, + g.dmvals, + name=outdir + s.name + "_" + str(val) + ".pdf", + norm=3, + log=True, + label="$\\log_{10} p({\\rm DM}_{\\rm EG},z)$ [a.u.]", + project=False, + FRBDM=frbdmvals, + FRBZ=frbzvals, + Aconts=[0.01, 0.1, 0.5], + zmax=1.5, + DMmax=3000, + # DMlines=nozlist, + ) + +#============================================================================== +""" +Function: commasep +Date: 23/08/2022 +Purpose: + Turn a string of variables seperated by commas into a list + +Imports: + s = String of variables + +Exports: + List conversion of s +""" +def commasep(s): + return list(map(str, s.split(','))) + +#============================================================================== + +main() diff --git a/papers/lsst/galaxies.py b/papers/lsst/galaxies.py new file mode 100644 index 00000000..e19be4f0 --- /dev/null +++ b/papers/lsst/galaxies.py @@ -0,0 +1,75 @@ +""" +File containing routines to read in host galaxy data +""" + +import numpy as np +import pandas as pd + +def read_meerkat(): + """ + returns z and mr data from Pastor-Morales et al + https://arxiv.org/pdf/2507.05982 + Detection method provided in private communication (Pastor-Morales) + """ + + data=np.loadtxt("Data/meerkat_mr.txt",comments='#') + z=data[:,2] + mr = data[:,3] + loc = data[:,4] # 1 is coherent beam, 0 incoherent only + z = np.abs(z) # -ve is + w = data[:,5] #PO|x + + # removes incoherent sum data + good = np.where(loc==1)[0] + z=z[good] + loc=loc[good] + mr=mr[good] + w = w[good] + + # removes missing data + good = np.where(z != 9999) + z = z[good] + loc=loc[good] + mr=mr[good] + w=w[good] + + return z,mr,w + +def convert_craft(): + """ + CRAFT ICS data + """ + + import pandas as pd + DF = pd.read_csv("Data/CRAFT_ICS_HTR_Catalogue1.csv") + + DF2 = pd.DataFrame(DF["TNS"]) + DF2["z"] = DF["Z"] + DF2.to_csv("Data/temp_craft_hosts.csv",index=False) + +def read_craft(): + """ + CRAFT ICS data + """ + + DF = pd.read_csv("Data/craft_ics_hosts.csv") + + z = np.array(DF["z"]) + mr = np.array(DF["mr"]) + nfrb = len(mr) + w = np.full([nfrb],1.) # artificial, but all are highy confidence + return z,mr,w + + + +def read_dsa(): + """ + Reads in DSA data from sharma et al + """ + DF = pd.read_csv("Data/dsa_hosts.csv") + + z = np.array(DF["z"]) + mr = np.array(DF["mr"]) + nfrb = len(mr) + w = np.array(DF["phost"]) # only gives most likely hosts + return z,mr,w diff --git a/papers/lsst/make_host_z_mag_plot.py b/papers/lsst/make_host_z_mag_plot.py new file mode 100644 index 00000000..b0237f42 --- /dev/null +++ b/papers/lsst/make_host_z_mag_plot.py @@ -0,0 +1,170 @@ +""" +Plots the expected magnitude-z relation for various FRB host galaxy models + +Adds points corresponding to known hosts + +We actually do this for best-fitting models from the PATH paper + +""" + +#standard Python imports +import os +import numpy as np +from matplotlib import pyplot as plt + +import galaxies as g + +# imports from the "FRB" series +from zdm import optical as opt +from zdm import optical_params as op +from zdm import loading +from zdm import cosmology as cos +from zdm import parameters +from zdm import loading +import galaxies as g +import astropath.priors as pathpriors +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + +def make_zmr_plots(): + """ + Loops over all ICS FRBs + """ + + # loops over different FRB host models + opdir="zmr/" + if not os.path.exists(opdir): + os.mkdir(opdir) + + # model 1: simple model + opstate = op.OpticalState() + # sets optical state to use simple linear interpolation + opstate.simple.AbsModelID = 1 # linear interpolation + opstate.simple.AppModelID = 1 # include k-correction + opstate.simple.NModelBins = 6 + opstate.simple.Absmin = -25 + opstate.simple.Absmax = -15 + model1 = opt.simple_host_model(opstate) + xbest = np.load("../pathpriors/simple_output/best_fit_params.npy") + model1.init_args(xbest) + + + # this is from an initial estimate. Currently, no way to enter this into the opstate. To do. + + #loudas model with SFR=0 + model2=opt.loudas_model() + model2.init_args(0.) + + # loudas model with SFR=1 + model3=opt.loudas_model() + model3.init_args(1.) + + xbest = np.load("../pathpriors/loudas_output/best_fit_params.npy") + model4=opt.loudas_model() + model4.init_args(xbest) + + # model from Lachlan + model5=opt.marnoch_model() + + models=[model1,model2,model3,model4,model5] + labels=["naive","sfr0","sfr1","loudas","marnoch"] + labels2=["(c) Naive","sfr0","sfr1","(b) Loudas25","(a) Marnoch23"] + + for i,model in enumerate(models): + opfile = opdir+labels[i]+"_zmr.png" + + make_host_plot(model,labels2[i],opfile) + + +def make_host_plot(model,label,opfile): + """ + generates a plot showing the magnitude and redshift of a bunch of FRB host galaxies + + Args: + model: optical model class instance + opfile: string labelling the plotfile + """ + + nz=50 + zmax=2 + zmin = zmax/nz + zvals = np.linspace(zmin,zmax,nz) + mrbins = np.linspace(0,40,401) + mrvals = (mrbins[1:] + mrbins[:-1])/2. + + medians = np.zeros([nz]) + sig1ds = np.zeros([nz]) + sig1us = np.zeros([nz]) + sig2ds = np.zeros([nz]) + sig2us = np.zeros([nz]) + + for i,z in enumerate(zvals): + pmr = model.get_pmr_gz(mrbins,z) + + cpmr = np.cumsum(pmr) + cpmr /= cpmr[-1] + median = np.where(cpmr > 0.5)[0][0] + sig1d = np.where(cpmr < 0.165)[0][-1] + sig1u = np.where(cpmr > 0.835)[0][0] + sig2d = np.where(cpmr < 0.025)[0][-1] + sig2u = np.where(cpmr > 0.975)[0][0] + + medians[i] = mrvals[median] + sig1ds[i] = mrvals[sig1d] + sig1us[i] = mrvals[sig1u] + sig2ds[i] = mrvals[sig2d] + sig2us[i] = mrvals[sig2u] + + plt.figure() + plt.plot(zvals,medians,linestyle="-",color="red",label="Mean $m_r$") + plt.plot(zvals,sig1ds,linestyle="--",color=plt.gca().lines[-1].get_color(),label="67% C.I.") + plt.plot(zvals,sig1us,linestyle="--",color=plt.gca().lines[-1].get_color()) + plt.plot(zvals,sig2ds,linestyle=":",color=plt.gca().lines[-1].get_color(),label="95% C.I.") + plt.plot(zvals,sig2us,linestyle=":",color=plt.gca().lines[-1].get_color()) + plt.xlabel("z") + plt.ylabel("$m$") + + z,mr,w = g.read_craft() + OK = np.where(w>= 0.5)[0] + plt.scatter(z[OK],mr[OK],marker='d',label="CRAFT ICS",s=20) + + z,mr,w = g.read_meerkat() + OK = np.where(w>= 0.5)[0] + plt.scatter(z[OK],mr[OK],marker='+',label="MeerTRAP coherent",s=20) + + z,mr,w = g.read_dsa() + OK = np.where(w>= 0.5)[0] + plt.scatter(z[OK],mr[OK],marker='o',label="DSA",s=20) + + zmax=2 + plt.ylim(10,30) + plt.xlim(0,zmax) + + print("label is ",label) + plt.text(0.04,29,label) + + #Rlim1=24.7 + #Rlim2=27.5 + #plt.plot([0,zmax],[Rlim1,Rlim1],linestyle=":",color="black") + #plt.plot([0,zmax],[Rlim2,Rlim2],linestyle=":",color="black") + #plt.text(0.1,Rlim1+0.2,"$m_r^{\\rm lim}=$"+str(Rlim1)) + #plt.text(0.1,Rlim2+0.2,"$m_r^{\\rm lim}=$"+str(Rlim2)) + + plt.legend() + plt.tight_layout() + plt.savefig(opfile) + plt.close() + + +if __name__ == "__main__": + + make_zmr_plots() + + + diff --git a/papers/lsst/meerkat_mr.txt b/papers/lsst/meerkat_mr.txt deleted file mode 100644 index 327c8558..00000000 --- a/papers/lsst/meerkat_mr.txt +++ /dev/null @@ -1,13 +0,0 @@ -# r-band magnitudes of FRB hosts from Table B1 of Pastor-Morales et al https://arxiv.org/pdf/2507.05982 -# FRB zspec mr loc -20220222 0.853 23.86 -20220224 0.6271 21.63 -20230125 0.3265 22.12 -20230503 -0.32 20.11 -20230613 0.3923 20.132 -20230827 0 22.03 -20230827 0 25.7 -20230907 0.4638 19.83 -20231010 -0.61 21.24 -20231020 0.4775 0.46 -20231210 -0.5 21.19 diff --git a/papers/lsst/sim_pz.py b/papers/lsst/sim_pz.py index f3051c0b..9f5afc52 100644 --- a/papers/lsst/sim_pz.py +++ b/papers/lsst/sim_pz.py @@ -27,6 +27,8 @@ from scipy.interpolate import CubicSpline from scipy import stats import matplotlib +import importlib.resources as resources + defaultsize=18 ds=4 @@ -37,35 +39,46 @@ #r-band limits 24.7, 27.5(single visit, 10 year, these are 5 sigma limits) -def main(opdir="Data/"): +def main(): + + plotdir="Plots/" + opdir="Data/" + optdir = str(resources.files('zdm').joinpath('data/optical'))+"/" - meerkat_z,meerkat_mr = read_meerkat() + meerkat_z,meerkat_mr,meerkat_w = read_meerkat() - Load=True + # we should re-do this shortly. + Load=False repeaters=False + Test=False # do this for very simplified data + Scat=False # do not use updated scattering model Rlim0 = 19.8 # existing magnitude limits Rlim1 = 24.7 Rlim2 = 27.5 + Rlim3 = 23.0 #decals + names=['CRAFT_CRACO_1300','MeerTRAPcoherent','SKA_mid'] - labels=["ASKAP CRACO", "MeerKAT/SKA-Mid","SKA-Mid"] + labels=["ASKAP CRACO", "MeerKAT","SKA-Mid"] prefixes=["CRACO","MeerTRAP","SKA_Mid"] linestyles = ["-","--",":"] imax=2 # because SKA and mid are so similar + if not os.path.exists(plotdir): + os.mkdir(plotdir) if not os.path.exists(opdir): os.mkdir(opdir) - + Rs,Rrmss,Rzvals,sbar,srms = process_rbands() plot_R(Rs,Rrmss,Rzvals,sbar,srms,opdir,Rlim1,Rlim2) - + if not Load: #gs,ss = get_surveys_grids(names,opdir,repeaters=True,Test=False) - ss,gs = get_surveys_grids(names,opdir,repeaters=False,Test=False) + ss,gs = get_surveys_grids(names,opdir,repeaters=repeaters,Test=Test,Scat=Scat) - plot_efficiencies(gs,ss,opdir,prefixes) + plot_efficiencies(gs,ss,opdir,prefixes,Test,Scat) plot_beams(ss,labels,opdir) # plots telescope efficiencies at z=0 @@ -85,9 +98,11 @@ def main(opdir="Data/"): fz0 = np.zeros([nz]) fz1 = np.zeros([nz]) fz2 = np.zeros([nz]) + fz3 = np.zeros([nz]) iz0 = np.where(Rbars < Rlim0)[-1] iz1 = np.where(Rbars < Rlim1)[-1] iz2 = np.where(Rbars < Rlim2)[-1] + iz3 = np.where(Rbars < Rlim3)[-1] for i,z in enumerate(zvals): if z < Rzvals[0]: @@ -108,32 +123,38 @@ def main(opdir="Data/"): fz0[i] = norm.cdf(Rlim0) fz1[i] = norm.cdf(Rlim1) fz2[i] = norm.cdf(Rlim2) - np.save(opdir+"fz_19.8.npy",fz0) - np.save(opdir+"fz_24.7.npy",fz1) - np.save(opdir+"fz_27.5.npy",fz2) + fz3[i] = norm.cdf(Rlim3) + np.save(optdir+"fz_19.8.npy",fz0) + np.save(optdir+"fz_24.7.npy",fz1) + np.save(optdir+"fz_27.5.npy",fz2) + np.save(optdir+"fz_23.0.npy",fz3) + np.save(opdir+"Rhist.npy",Rhist) np.save(opdir+"Rvals.npy",Rvals) np.save(opdir+"Rbars.npy",Rbars) else: - fz0 = np.load(opdir+"fz_19.8.npy") - fz1 = np.load(opdir+"fz_24.7.npy") - fz2 = np.load(opdir+"fz_27.5.npy") + fz0 = np.load(optdir+"fz_19.8.npy") + fz1 = np.load(optdir+"fz_24.7.npy") + fz2 = np.load(optdir+"fz_27.5.npy") Rhist = np.load(opdir+"Rhist.npy") Rvals = np.load(opdir+"Rvals.npy") Rbars = np.load(opdir+"Rbars.npy") - plt.figure() plt.xlabel("z") - plt.ylabel("$f_{m_r}$") - plt.plot(zvals,fz1,label="$f_{24.7}$") - plt.plot(zvals,fz2,label="$f_{27.5}$") + plt.ylabel("fraction visible") + plt.plot(zvals,fz1,label="$m_{r}^{\\rm lim}=24.7$") + plt.plot(zvals,fz2,label="$m_{r}^{\\rm lim}=27.5$",linestyle="--") + plt.ylim(0,1.05) + plt.xlim(0,6) plt.legend() plt.tight_layout() - plt.savefig(opdir+"fraction_visible.png") + plt.savefig(plotdir+"fraction_visible.png") plt.close() - DPplot(zvals,[fz1],["$m_r = 24.7$"],opdir + "DP_fraction_visible.png",color="orange") + + + DPplot(zvals,[fz1],["$m_r = 24.7$"],plotdir + "DP_fraction_visible.png",color="orange") ####### p(z) plot ##### plt.figure() @@ -146,7 +167,7 @@ def main(opdir="Data/"): plt.xlabel("$m_r$") plt.ylabel("$p(m_r)$ [a.u.]") ax2 = plt.gca() - + imax=2 for i,prefix in enumerate(prefixes): if i==imax: break @@ -176,6 +197,9 @@ def main(opdir="Data/"): pz2 = pz*fz2 np.save(opdir+"zvals.npy",g.zvals) + np.save(optdir+"z_19.8.npy",g.zvals) + np.save(optdir+"z_24.7.npy",g.zvals) + np.save(optdir+"z_27.5.npy",g.zvals) np.save(opdir+prefixes[i]+"_pz.npy",pz) np.save(opdir+prefixes[i]+"_fpz0.npy",pz0) np.save(opdir+prefixes[i]+"_fpz1.npy",pz1) @@ -187,9 +211,9 @@ def main(opdir="Data/"): print(i,"Norm is ",norm) plt.plot(zvals,pz/norm,label=labels[i],linestyle="-") - plt.plot(zvals,pz0/norm,linestyle="-.",color=plt.gca().lines[-1].get_color()) - plt.plot(zvals,pz1/norm,linestyle="--",color=plt.gca().lines[-1].get_color()) - plt.plot(zvals,pz2/norm,linestyle=":",color=plt.gca().lines[-1].get_color()) + #plt.plot(zvals,pz0/norm,linestyle="-.",color=plt.gca().lines[-1].get_color()) + plt.plot(zvals,pz1/norm,linestyle="--",color=plt.gca().lines[-1].get_color(),label=' single visit') + plt.plot(zvals,pz2/norm,linestyle=":",color=plt.gca().lines[-1].get_color(),label=' 10 yr co-adds') print("For survey ",prefixes[i]," number of FRBs will be ",np.sum(pz),np.sum(pz0),np.sum(pz1),\ np.sum(pz2),np.sum(pz0)/np.sum(pz),np.sum(pz1)/np.sum(pz),np.sum(pz2)/np.sum(pz)) @@ -203,15 +227,19 @@ def main(opdir="Data/"): plt.sca(ax2) norm = np.max(mag_hist) - plt.plot(Rbars,mag_hist/norm,label=labels[i]) + plt.plot(Rbars,mag_hist/norm,label=labels[i],linestyle=linestyles[i]) + + if prefix == "MeerTRAP": + mtmh = mag_hist + mtpz = pz plt.sca(ax1) plt.ylim(0,1.02) - plt.xlim(0,6) - plt.legend() + plt.xlim(0,5) + plt.legend(fontsize=16) plt.tight_layout() - plt.savefig(opdir+"lsst_pz.png") + plt.savefig(plotdir+"lsst_pz.png") plt.close() plt.sca(ax2) @@ -220,25 +248,83 @@ def main(opdir="Data/"): #meerkat_z,meerkat_mr mrbins=np.linspace(10,30,21) - weights = np.full([11],0.25) + nfrb = len(meerkat_mr) + - plt.hist(meerkat_mr,bins=mrbins,weights=weights,label="MK 2023 data") plt.plot([Rlim1,Rlim1],[0,1],linestyle=":",color="black") plt.plot([Rlim2,Rlim2],[0,1],linestyle=":",color="black") - plt.text(Rlim1+0.1,0.1,"$m_r=$"+str(Rlim1),rotation=90) - plt.text(Rlim2+0.1,0.1,"$m_r=$"+str(Rlim2),rotation=90) + plt.text(Rlim1-1.5,0.1,"$m_r^{\\rm lim}=$"+str(Rlim1),rotation=90) + plt.text(Rlim2-1.5,0.1,"$m_r^{\\rm lim}=$"+str(Rlim2),rotation=90) + #plt.legend(loc="upper left") + plt.legend(fontsize=14) + plt.tight_layout() + plt.savefig(plotdir+"lsst_pR.png") + + + plt.hist(meerkat_mr,bins=mrbins,weights=meerkat_w/4.,label="MK 2023 data") + plt.legend() + plt.tight_layout() + plt.savefig(plotdir+"lsst_pR_w_hist.png") + plt.close() + + ########## MeerKAT comparisons ########## + ####### magnitude ####### + # does cumulative histogram, and compares to expected + mtcs = np.cumsum(mtmh) + mtcs /= mtcs[-1] + from zdm import optical_numerics as on + cdf = on.make_cdf(Rbars,meerkat_mr,meerkat_w,norm=False) + # normalsie by fraction of FRBs actually studied + + cdf *= np.sum(meerkat_w)/(10.*cdf[-1]) # should get about 6 of 10 FRBs + + maxmr = np.max(meerkat_mr) + OK = np.where(Rbars <= 24)[0] + + plt.figure() + plt.xlabel("$m_r$") + plt.ylabel("cdf$(m_r)$") + plt.ylim(0,1) + plt.xlim(15,30) + plt.plot(Rbars,mtcs,label="Prediction",linestyle="--") + plt.plot(Rbars[OK],cdf[OK],label="Observations",linestyle="-") + plt.legend() + plt.tight_layout() + plt.savefig(plotdir+"meerkat_mr_comparison.png") + plt.close() + + ########## redshift ######## + mtcs = np.cumsum(mtpz) + mtcs /= mtcs[-1] + + cdf = on.make_cdf(zvals,meerkat_z,meerkat_w,norm=False) + # normalsie by fraction of FRBs actually studied + + cdf *= np.sum(meerkat_w)/(10.*cdf[-1]) # should get about 6 of 10 FRBs + + maxz = np.max(meerkat_z) + + OK = np.where(zvals <= maxz+0.1)[0] + + plt.figure() + plt.xlabel("$z$") + plt.ylabel("cdf$(z)$") + plt.ylim(0,1) + plt.xlim(0,3) + plt.plot(zvals,mtcs,label="Prediction",linestyle="--") + plt.plot(zvals[OK],cdf[OK],label="Observations",linestyle="-") plt.legend() plt.tight_layout() - plt.savefig(opdir+"lsst_pR.png") + plt.savefig(plotdir+"meerkat_z_comparison.png") plt.close() - DPplot(zvals,[pz,pz1],["all FRBs","LSST"],opdir + "DP_pz.png",color="orange",legend=False) + DPplot(zvals,[pz,pz1],["all FRBs","LSST"],plotdir + "DP_pz.png",color="orange",legend=False) DPplot(zvals,[pz,pz1,pz0],["all FRBs","LSST","Now"],opdir + "DP_pz0.png",color="orange",legend=False) DPplot(zvals,[pz1,pz0],["LSST","Now"],opdir + "DP_lsst_vs_now.png",color="orange",legend=False) - + def DPplot(zvals,yvals,labels,outfile,color="orange",legend=True): fig = plt.figure() @@ -301,13 +387,31 @@ def read_meerkat(): """ returns z and mr data from Pastor-Morales et al https://arxiv.org/pdf/2507.05982 + Detection method provided in private communication (Pastor-Morales) """ - data=np.loadtxt("meerkat_mr.txt",comments='#') - z=data[:,1] - mr = data[:,2] - z = np.abs(z) - return z,mr + data=np.loadtxt("Data/meerkat_mr.txt",comments='#') + z=data[:,2] + mr = data[:,3] + loc = data[:,4] # 1 is coherent beam, 0 incoherent only + z = np.abs(z) # -ve is + w = data[:,5] #PO|x + + # removes incoherent sum data + good = np.where(loc==1)[0] + z=z[good] + loc=loc[good] + mr=mr[good] + w = w[good] + + # removes missing data + good = np.where(z != 9999) + z = z[good] + loc=loc[good] + mr=mr[good] + w=w[good] + + return z,mr,w def plot_R(Rbars,Rrmss,Rzvals,sbar,srms,opdir,Rlim1,Rlim2): # plot of mean and rms from Gaussian assumption @@ -329,13 +433,21 @@ def plot_R(Rbars,Rrmss,Rzvals,sbar,srms,opdir,Rlim1,Rlim2): plt.close() -def plot_efficiencies(gs,ss,opdir,prefixes): +def plot_efficiencies(gs,ss,opdir,prefixes,Test=False,Scat=False): + """ + Generates a plot of efficiencies at the 0th zbin. Or, for all zbins, + if we are doing a test + """ + for i,s in enumerate(ss): plt.figure() g=gs[i] for j,w in enumerate(s.wlist): - plt.plot(g.dmvals,s.efficiencies[j,0,:],label="w="+str(w)[0:5]) # at z=0 + if Scat: + plt.plot(g.dmvals,s.efficiencies[j,0,:],label="w="+str(w)[0:5]) # at z=0 + else: + plt.plot(g.dmvals,s.efficiencies[j,:],label="w="+str(w)[0:5]) plt.xlabel("DM") plt.ylabel("$\\epsilon$") plt.yscale("log") @@ -345,13 +457,16 @@ def plot_efficiencies(gs,ss,opdir,prefixes): plt.savefig(opdir+prefixes[i]+"_efficiencies.png") plt.close() -def get_surveys_grids(names,opdir,repeaters=True,Test=False): +def get_surveys_grids(names,opdir,repeaters=True,Test=False,Scat=False): # approximate best-fit values from recent analysis # load states from Hoffman et al 2025 # use b or d for rep - state = states.load_state("HoffmannEmin25",scat="updated",rep='b') + if Scat: + state = states.load_state("HoffmannHalo25",scat="updated",rep='b') + else: + state = states.load_state("HoffmannHalo25",rep='b') # artificially add repeater data - we can't actually know this, # because we don't have time per field. Just using one day for now @@ -383,7 +498,7 @@ def get_surveys_grids(names,opdir,repeaters=True,Test=False): nz=50 dmmax=4000 zmax=4 - survey_dict["WMETHOD"] = 2 + else: ndm=1400 nz=600 @@ -391,8 +506,12 @@ def get_surveys_grids(names,opdir,repeaters=True,Test=False): zmax=6 # uses redshift-dependent scattering. This takes longer # - by a factor of a few! - survey_dict["WMETHOD"] = 3 + #survey_dict["Wmethod"] = 3 + if Scat: + survey_dict["Wmethod"] = 3 + else: + survey_dict["Wmethod"] = 2 ss,gs = loading.surveys_and_grids(survey_names=names,repeaters=repeaters,init_state=state, sdir=sdir,survey_dict=survey_dict,nz=nz,zmax=zmax,ndm=ndm,dmmax=dmmax) return ss,gs diff --git a/papers/pathpriors/compare_posteriors.py b/papers/pathpriors/compare_posteriors.py new file mode 100644 index 00000000..149ac29b --- /dev/null +++ b/papers/pathpriors/compare_posteriors.py @@ -0,0 +1,302 @@ +""" +This file fits the simple (naive) model to CRAFT ICS observations. +It fits a model of absolute galaxy magnitude distributions, +uses zDM to predict redshifts and hence apparent magntidues, +runs PATH using that prior, and tries to get priors to match posteriors. + +It also geenrates host z-mr plots + +""" + + +#standard Python imports +import os +import numpy as np +from matplotlib import pyplot as plt +from scipy.optimize import minimize + +# imports from the "FRB" series +from zdm import optical as opt +from zdm import optical_params as op +from zdm import loading +from zdm import cosmology as cos +from zdm import parameters +from zdm import loading +from zdm import optical_numerics as on +from zdm import states +from zdm import frb_lists as lists + +# other FRB library imports +import astropath.priors as pathpriors + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + + +def main(): + """ + Main function + Contains outer loop to iterate over parameters + + """ + + + ######### List of all ICS FRBs for which we can run PATH ####### + # hard-coded list of FRBs with PATH data in ice paper + frblist = lists.icslist + + # Initlisation of zDM grid + # Eventually, this should be part of the loop, i.e. host IDs should + # be re-fed into FRB surveys. However, it will be difficult to do this + # with very limited redshift estimates. That might require posterior + # estimates of redshift given the observed galaxies. Maybe. + state = states.load_state("HoffmannHalo25",scat=None,rep=None) + #state = parameters.State() + cos.set_cosmology(state) + cos.init_dist_measures() + names=['CRAFT_ICS_892','CRAFT_ICS_1300','CRAFT_ICS_1632'] + ss,gs = loading.surveys_and_grids(survey_names=names,init_state=state) + + + # initialise figure. ax1 is new vs old. ax2 is new-old vs old + plt.figure() + ax1=plt.gca() + plt.xlabel("$P(O_i| \\mathbf{x})$ [original; $P(U)=0.1$]") + plt.ylabel("$P(O_i| \\mathbf{x},N_O)$ [this work]") + + plt.figure() + ax2=plt.gca() + plt.xlabel("$P(O_i| \\mathbf{x})$ [original; $P(U)=0.1$]") + plt.ylabel("$\Delta P(O_i| \\mathbf{x},N_O)$") + + + ##### Begins by calculating the original PATH result ##### + # calculates the original PATH result + wrappers = None + NFRB2,AppMags2,AppMagPriors2,ObsMags2,ObsPrior2,ObsPosteriors2,PUprior2,PUobs2,sumPUprior2,sumPUobs2,frbs,dms = \ + on.calc_path_priors(frblist,ss,gs,wrappers,verbose=False,usemodel=False,P_U=0.1) + fObsPosteriors2 = np.array(on.flatten(ObsPosteriors2)) + + + with open("posteriors/orig.txt",'w') as f: + for i,frb in enumerate(frbs): + f.write(str(i)+" "+frb+" "+str(dms[i])[0:5]+" "+str(PUprior2[i])[0:4]+"\n") + for j,om in enumerate(ObsMags2[i]): + f.write(str(om)[0:5]+" "+ "%e" % ObsPrior2[i][j]+" "+"%e" % ObsPosteriors2[i][j]+"\n") + f.write("\n") + + + #### creates some lists to later pass to make_cumulative_plots #### + NFRBlist = [] + AppMagslist = [] + AppMagPriorslist = [] + ObsMagslist = [] + ObsPosteriorslist = [] + PUpriorlist = [] + PUobslist = [] + sumPUpriorlist = [] + sumPUobslist = [] + + + ####### Model 1: Marnoch ######## + + # model 1: Marnoch + model = opt.marnoch_model() + + wrappers = on.make_wrappers(model,gs) + NFRB1,AppMags1,AppMagPriors1,ObsMags1,ObsPrior1,ObsPosteriors1,PUprior1,PUobs1,sumPUprior1,sumPUobs1,frbs,dms = on.calc_path_priors(frblist,ss,gs,wrappers,verbose=False) + + fObsPosteriors1 = np.array(on.flatten(ObsPosteriors1)) + plt.sca(ax1) + plt.scatter(fObsPosteriors2,fObsPosteriors1,label="Marnoch23",marker='s') + plt.sca(ax2) + plt.scatter(fObsPosteriors2,fObsPosteriors1-fObsPosteriors2,label="Marnoch23",marker='s') + + + with open("posteriors/marnoch.txt",'w') as f: + for i,frb in enumerate(frbs): + f.write(str(i)+" "+frb+" "+str(dms[i])[0:5]+" "+str(PUprior1[i])[0:4]+"\n") + for j,om in enumerate(ObsMags1[i]): + f.write(str(om)[0:5]+" "+ "%e" % ObsPrior1[i][j]+" "+"%e" % ObsPosteriors1[i][j]+"\n") + f.write("\n") + + ####### Model 2: Loudas ######## + + model = opt.loudas_model() + xbest = np.load("loudas_output/best_fit_params.npy") + model.init_args(xbest) # best-fit arguments + wrappers = on.make_wrappers(model,gs) + NFRB3,AppMags3,AppMagPriors3,ObsMags3,ObsPrior3,ObsPosteriors3,PUprior3,PUobs3,sumPUprior3,sumPUobs3,frbs,dms = on.calc_path_priors(frblist,ss,gs,wrappers,verbose=False) + + fObsPosteriors3 = np.array(on.flatten(ObsPosteriors3)) + plt.sca(ax1) + plt.scatter(fObsPosteriors2,fObsPosteriors3,label="Loudas25",marker='x') + plt.sca(ax2) + plt.scatter(fObsPosteriors2,fObsPosteriors3-fObsPosteriors2,label="Loudas25",marker='x') + + with open("posteriors/loudas.txt",'w') as f: + for i,frb in enumerate(frbs): + f.write(str(i)+" "+frb+" "+str(dms[i])[0:5]+" "+str(PUprior3[i])[0:4]+"\n") + for j,om in enumerate(ObsMags3[i]): + f.write(str(om)[0:5]+" "+ "%e" % ObsPrior3[i][j]+" "+"%e" % ObsPosteriors3[i][j]+"\n") + f.write("\n") + + + + ####### Model 3: Simple ######## + + # Case of simple host model + opstate = op.OpticalState() + # sets optical state to use simple linear interpolation + opstate.simple.AbsModelID = 1 # linear interpolation + opstate.simple.AppModelID = 1 # include k-correction + opstate.simple.NModelBins = 6 + opstate.simple.Absmin = -25 + opstate.simple.Absmax = -15 + model = opt.simple_host_model(opstate) + + # retrieve default initial arguments in vector form + xbest = np.load("simple_output/best_fit_params.npy") + #x = [-2.28795519, 0., 0. , 0. , 0.11907231,0.84640048,0.99813815 , 0., 0. , 0. , 0. ] + + # initialises best-fit arguments + model.init_args(xbest) + + ############# Generate a KS-like plot showing the best fits #################### + wrappers = on.make_wrappers(model,gs) + NFRB4,AppMags4,AppMagPriors4,ObsMags4,ObsPrior4,ObsPosteriors4,PUprior4,PUobs4,sumPUprior4,sumPUobs4,frbs,dms = on.calc_path_priors(frblist,ss,gs,wrappers,verbose=False) + + fObsPosteriors4 = np.array(on.flatten(ObsPosteriors4)) + plt.sca(ax1) + plt.scatter(fObsPosteriors2,fObsPosteriors4,label="Naive",marker='o',s=20) + plt.sca(ax2) + plt.scatter(fObsPosteriors2,fObsPosteriors4-fObsPosteriors2,label="Naive",marker='o',s=20) + + # format and save ax1 + plt.sca(ax1) + plt.legend(loc="lower right") + plt.xlim(0,1) + plt.ylim(0,1) + plt.plot([0,1],[0,1],color="black",linestyle=":") + plt.tight_layout() + plt.savefig("posteriors/pox_comparison.png") + plt.close() + + # format and save ax2 + plt.sca(ax2) + plt.legend(loc="upper left") + plt.xlim(0,1) + plt.ylim(-0.2,0.2) + plt.plot([0,1],[0,0],color="black",linestyle=":") + plt.text(0.4,0.01,"no change") + plt.plot([0.8,1],[0.2,0],color="black",linestyle=":") + plt.text(0.85,0.08,"$P(O_i| \\mathbf{x})=1$",rotation=-60) + + plt.plot([0,0.2],[0,-0.2],color="black",linestyle=":") + plt.text(0.03,-0.18,"$P(O_i| \\mathbf{x})=0$",rotation=-60) + + plt.tight_layout() + plt.savefig("posteriors/delta_pox_comparison.png") + plt.close() + + with open("posteriors/naive.txt",'w') as f: + for i,frb in enumerate(frbs): + f.write(str(i)+" "+frb+" "+str(dms[i])[0:5]+" "+str(PUprior4[i])[0:4]+"\n") + for j,om in enumerate(ObsMags4[i]): + f.write(str(om)[0:5]+" "+ "%e" % ObsPrior4[i][j]+" "+"%e" % ObsPosteriors4[i][j]+"\n") + f.write("\n") + + all_candidates = on.get_cand_properties(frblist) + + # now iterates through galaxies and writes relevant info + for i in np.arange(NFRB1): + string1="\multicolumn{5}{c|}{"+frbs[i]+"} & " + string1 += f"{PUprior2[i]:.3f} & {PUobs2[i]:.3f} & " + string1 += f"{PUprior1[i]:.3f} & {PUobs1[i]:.3f} & " + string1 += f"{PUprior3[i]:.3f} & {PUobs3[i]:.3f} & " + string1 += f"{PUprior4[i]:.3f} & {PUobs4[i]:.3f} \\\\ " + print("\\hline") + print(string1) + print("\\hline") + + + + for j,mag in enumerate(ObsMags4[i]): + # check if we print this one at all + if ObsPosteriors1[i][j] < 1e-4 and ObsPosteriors2[i][j] < 1e-4 \ + and ObsPosteriors3[i][j] < 1e-4 and ObsPosteriors4[i][j] < 1e-4: + + continue + + string2 = f"{all_candidates[i]['ra'][j]:.4f} & {all_candidates[i]['dec'][j]:.4f} &" + string2 += f" {all_candidates[i]['separation'][j]:.2f} &" + string2 += f" {all_candidates[i]['ang_size'][j]:.2f} & {all_candidates[i]['mag'][j]:.2f} &" + + + string2 += f"{ObsPrior2[i][j]:.3f} & {ObsPosteriors2[i][j]:.3f} & " + string2 += f"{ObsPrior1[i][j]:.3f} & {ObsPosteriors1[i][j]:.3f} & " + string2 += f"{ObsPrior3[i][j]:.3f} & {ObsPosteriors3[i][j]:.3f} & " + string2 += f"{ObsPrior4[i][j]:.3f} & {ObsPosteriors4[i][j]:.3f} \\\\ " + print(string2) + + + + ######## Makes cumulative distribution KS-style plots + + # loads various marnoch models + model = opt.loudas_model() + xbest = np.load("loudas_output/best_fit_params.npy") + for f_sfr in [0,1,xbest]: + x=[f_sfr] + model.init_args(x) + wrappers = on.make_wrappers(model,gs) + NFRB,AppMags,AppMagPriors,ObsMags,ObsPriors,ObsPosteriors,PUprior,PUobs,sumPUprior,sumPUobs,frbs,dms = on.calc_path_priors(frblist,ss,gs,wrappers,verbose=False) + + NFRBlist.append(NFRB) + AppMagslist.append(AppMags) + AppMagPriorslist.append(AppMagPriors) + ObsMagslist.append(ObsMags) + ObsPosteriorslist.append(ObsPosteriors) + PUpriorlist.append(PUprior) + PUobslist.append(PUobs) + sumPUpriorlist.append(sumPUprior) + sumPUobslist.append(sumPUobs) + + # loads naive model + NFRBlist.append(NFRB4) + AppMagslist.append(AppMags4) + AppMagPriorslist.append(AppMagPriors4) + ObsMagslist.append(ObsMags4) + ObsPosteriorslist.append(ObsPosteriors4) + PUpriorlist.append(PUprior4) + PUobslist.append(PUobs4) + sumPUpriorlist.append(sumPUprior4) + sumPUobslist.append(sumPUobs4) + + + # loads Marnoch model + NFRBlist.append(NFRB1) + AppMagslist.append(AppMags1) + AppMagPriorslist.append(AppMagPriors1) + ObsMagslist.append(ObsMags1) + ObsPosteriorslist.append(ObsPosteriors1) + PUpriorlist.append(PUprior1) + PUobslist.append(PUobs1) + sumPUpriorlist.append(sumPUprior1) + sumPUobslist.append(sumPUobs1) + + + plotlabels=["Loudas25: $f_{\\rm sfr} = 0$", " $f_{\\rm sfr} = 1$", + " $f_{\\rm sfr} = 3$","Naive","Marnoch23"] + plotfile="Plots/all_cumulative.png" + NMODELS=5 + on.make_cumulative_plots(NMODELS,NFRBlist,AppMagslist,AppMagPriorslist,ObsMagslist,ObsPosteriorslist, + PUobslist,PUpriorlist,plotfile,plotlabels,POxcut=None,onlyobs=0,addpriorlabel=False) + +main() diff --git a/papers/pathpriors/fit_loudas_model.py b/papers/pathpriors/fit_loudas_model.py new file mode 100644 index 00000000..79a9a30f --- /dev/null +++ b/papers/pathpriors/fit_loudas_model.py @@ -0,0 +1,206 @@ +""" +This file fits and generates plots for the model of Loudas et al +""" + + +#standard Python imports +import os +import numpy as np +from matplotlib import pyplot as plt +from scipy.optimize import minimize +from scipy.stats import chi2 + +# imports from the "FRB" series +from zdm import optical as opt +from zdm import optical_params as op +from zdm import loading +from zdm import cosmology as cos +from zdm import parameters +from zdm import loading +from zdm import optical_numerics as on +from zdm import states +from zdm import frb_lists as lists + +# other FRB library imports +import astropath.priors as pathpriors + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + + +def main(): + """ + Main function + Contains outer loop to iterate over parameters + + """ + + ######### List of all ICS FRBs for which we can run PATH ####### + # hard-coded list of FRBs with PATH data in ice paper + frblist = lists.icslist + + # Initlisation of zDM grid + # Eventually, this should be part of the loop, i.e. host IDs should + # be re-fed into FRB surveys. However, it will be difficult to do this + # with very limited redshift estimates. That might require posterior + # estimates of redshift given the observed galaxies. Maybe. + state = states.load_state("HoffmannHalo25",scat=None,rep=None) + #state = parameters.State() + cos.set_cosmology(state) + cos.init_dist_measures() + names=['CRAFT_ICS_892','CRAFT_ICS_1300','CRAFT_ICS_1632'] + ss,gs = loading.surveys_and_grids(survey_names=names,init_state=state) + + + ######## Determnine which statistic to use in optimisation ######## + # setting istat=0 means using a ks statistic to fit p(m_r) + # setting istat=1 means using a maximum likelihood estimator + istat=1 + # determines which model to use + modelname = "loudas" + + opdir = modelname+"_output/" + POxcut = None # set to e.g. 0.9 to reject FRBs with lower posteriors when doing model comparisons + + if not os.path.exists(opdir): + os.mkdir(opdir) + + model = opt.loudas_model() + # set f_sfr starting value to 0.5 + x0 = [0.5] + bounds=[(-3,3)] # large range - physical region is 0 to 1 + + # initialise aguments to minimisation function + args=[frblist,ss,gs,model,POxcut,istat] + + # turn minimise on to re-perform the minimusation. But it's already been done + minimise=True + if minimise: + result = minimize(on.function,x0 = x0,args=args,bounds = bounds) + + print("Best fit result is f_sfr = ",result.x) + x = result.x + # saves result + np.save(opdir+"/best_fit_params.npy",x) + else: + # replace later + x=[3] + print("using previous result of f_sfr = ",x) + + # initialises arguments + model.init_args(x) + bestx=x[0] + xstring = f"{bestx:.1f}" + + outfile = opdir+"loudas_best_fit_apparent_magnitudes.png" + wrappers = on.make_wrappers(model,gs) + NFRB,AppMags,AppMagPriors,ObsMags,ObsPriors,ObsPosteriors,PUprior,PUobs,sumPUprior,sumPUobs,frbs,dms = on.calc_path_priors(frblist,ss,gs,wrappers,verbose=False) + llstat = on.calculate_likelihood_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUobs,PUprior,plotfile=outfile) + llbest = llstat + ksstat = on.calculate_ks_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,sumPUobs,sumPUprior,plotfile=outfile) + + print("Best-fit stats of the Loudas model are ll=",llstat," ks = ",ksstat) + + # makes a ks stat plot comparing three scenarios - SFR, M*, and best fit + + NFRBlist = [] + AppMagslist = [] + AppMagPriorslist = [] + ObsMagslist = [] + ObsPosteriorslist = [] + PUpriorlist = [] + PUobslist = [] + sumPUpriorlist = [] + sumPUobslist = [] + + for f_sfr in [bestx,0,1]: + x=[f_sfr] + model.init_args(x) + wrappers = on.make_wrappers(model,gs) + NFRB,AppMags,AppMagPriors,ObsMags,ObsPriors,ObsPosteriors,PUprior,PUobs,sumPUprior,sumPUobs,frbs,dms = on.calc_path_priors(frblist,ss,gs,wrappers,verbose=False) + + NFRBlist.append(NFRB) + AppMagslist.append(AppMags) + AppMagPriorslist.append(AppMagPriors) + ObsMagslist.append(ObsMags) + ObsPosteriorslist.append(ObsPosteriors) + PUpriorlist.append(PUprior) + PUobslist.append(PUobs) + sumPUpriorlist.append(sumPUprior) + sumPUobslist.append(sumPUobs) + + NMODELS = 3 + + plotlabels=["$f_{\\rm sfr} = "+xstring+"$", "$f_{\\rm sfr} = 0$", "$f_{\\rm sfr} = 1$"] + plotfile = opdir+"loudas_f0_1_best_comparison.png" + on.make_cumulative_plots(NMODELS,NFRBlist,AppMagslist,AppMagPriorslist,ObsMagslist,ObsPosteriorslist, + PUobslist,PUpriorlist,plotfile,plotlabels,POxcut=None,onlyobs=0,abc="(b)") + + + NSFR=31 + stats = np.zeros([NSFR]) + SFRs = np.linspace(0,3,NSFR) + pvalues = np.zeros([NSFR]) + dlls = np.zeros([NSFR]) + + for istat,sfr in enumerate(SFRs): + if not minimise: + break + model.init_args([sfr]) + wrappers = on.make_wrappers(model,gs) + NFRB,AppMags,AppMagPriors,ObsMags,ObsPriors,ObsPosteriors,PUprior,\ + PUobs,sumPUprior,sumPUobs,frbs,dms = on.calc_path_priors(frblist,ss,gs,wrappers,verbose=False) + stat = on.calculate_likelihood_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUobs, + PUprior,plotfile=outfile,POxcut=POxcut) + stats[istat] = stat + dll = 2.*(llbest-stat) * np.log(10) # stat returned in log base 10, needs to be natural log + p_wilks = 1.-chi2.cdf(dll,1) + pvalues[istat] = p_wilks + dlls[istat] = dll + + if minimise: + # save data if doing this for the first time + np.save(opdir+"/llk.npy",stats) + np.save(opdir+"/pvalues.npy",pvalues) + np.save(opdir+"/dlls.npy",dlls) + else: + # else, load it + stats = np.load(opdir+"/llk.npy") + pvalues = np.load(opdir+"/pvalues.npy") + dlls = np.load(opdir+"/dlls.npy") + + # print values + for i,f in enumerate(SFRs): + print("p-value of ",f," is ",pvalues[i]) + + + outfile = opdir+"scan_sfr.png" + + plt.figure() + l1,=plt.plot(SFRs,stats,marker="o") + plt.xlabel("$f_{\\rm sfr}$") + plt.ylabel("$\\log_{10} \\mathcal{L}(f_{\\rm sfr})$") + plt.xlim(0,3) + plt.ylim(40,48) + + ax2 = plt.gca().twinx() + l2,=ax2.plot(SFRs,pvalues,color="black",linestyle=":",label="p-value") + plt.yscale('log') + plt.ylabel('p-value') + plt.ylim(1e-3,1.) + + plt.legend(handles=[l1,l2],labels=["$\\log_{10} \\mathcal{L} (f_{\\rm sfr})$","p-value"],loc="lower right") + + plt.tight_layout() + plt.savefig(outfile) + plt.close() + + + +main() diff --git a/papers/pathpriors/fit_simple_model.py b/papers/pathpriors/fit_simple_model.py new file mode 100644 index 00000000..953b439d --- /dev/null +++ b/papers/pathpriors/fit_simple_model.py @@ -0,0 +1,249 @@ +""" +This file fits the simple (naive) model to CRAFT ICDS observations. +It fits a model of absolute galaxy magnitude distributions, +uses zDM to predict redshifts and hence apparent magntidues, +runs PATH using that prior, and tries to get priors to match posteriors. + +""" + + +#standard Python imports +import os +import numpy as np +from matplotlib import pyplot as plt +from scipy.optimize import minimize +from scipy.stats import chi2 + +# imports from the "FRB" series +from zdm import optical as opt +from zdm import optical_params as op +from zdm import loading +from zdm import cosmology as cos +from zdm import parameters +from zdm import loading +from zdm import optical_numerics as on +from zdm import states +from zdm import frb_lists as lists + +# other FRB library imports +import astropath.priors as pathpriors + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + + +def main(): + """ + Main function + Contains outer loop to iterate over parameters + + """ + + ######### List of all ICS FRBs for which we can run PATH ####### + # hard-coded list of FRBs with PATH data in ice paper + frblist = lists.icslist + + # Initlisation of zDM grid + # Eventually, this should be part of the loop, i.e. host IDs should + # be re-fed into FRB surveys. However, it will be difficult to do this + # with very limited redshift estimates. That might require posterior + # estimates of redshift given the observed galaxies. Maybe. + state = states.load_state("HoffmannHalo25",scat=None,rep=None) + + # initialise cosmology + cos.set_cosmology(state) + cos.init_dist_measures() + + names=['CRAFT_ICS_892','CRAFT_ICS_1300','CRAFT_ICS_1632'] + ss,gs = loading.surveys_and_grids(survey_names=names,init_state=state) + + + ######## Determnine which statistic to use in optimisation ######## + # setting istat=0 means using a ks statistic to fit p(m_r) + # setting istat=1 means using a maximum likelihood estimator + istat=1 + # dok=True means use the k-correction + dok = True + # we are using the simple model + modelname = "simple" + # set to e.g. 0.9 to reject FRBs with lower posteriors when doing model comparisons + POxcut = None + + opdir = "simple_output/" + + + if not os.path.exists(opdir): + os.mkdir(opdir) + + # Case of simple host model + # Initialisation of model + # simple host model + opstate = op.OpticalState() + # sets optical state to use simple linear interpolation + opstate.simple.AbsModelID = 1 + opstate.simple.NModelBins = 6 + opstate.simple.Absmin = -25 + opstate.simple.Absmax = -15 + + # sets up initial bounds on variables + if dok: + opstate.simple.AppModelID = 1 # k-correction + Nparams = opstate.simple.NModelBins+1 + opstate.simple.AppModelID = 1 # sets to include k-correction + opstate.simple.k = 0.5 # for some reason, this just doesn't make much difference to results + bounds = [(-25,25)]+[(0,1)]*(Nparams-1) + else: + Nparams = opstate.simple.NModelBins + # bins now give log-space values, hence -5,2 is range of 10^7 + if opstate.simple.AbsModelID == 3: + base=(-5,2) # log space + else: + base=(0,1) # linear space + bounds = [base]*(Nparams) + opstate.simple.AppModelID = 0 # no k-correction + + model = opt.simple_host_model(opstate) + + # retrieve default initial arguments in vector form + x0 = model.get_args() + + # initialise aguments to minimisation function + args=[frblist,ss,gs,model,POxcut,istat] + + # set to false to just use hard-coded best fit parameters + minimise=True + if minimise: + result = minimize(on.function,x0 = x0,args=args,bounds = bounds) + print("Best fit result is ",result.x) + x = result.x + # saves result + np.save(opdir+"/best_fit_params.npy",x) + else: + # hard-coded best fit parameters from running optimisation + x = np.load(opdir+"best_fit_params.npy") + + # initialises best-fit arguments + model.init_args(x) + + ############# Generate a KS-like plot showing the best fits #################### + outfile = opdir+"simple_best_fit_apparent_magnitudes.png" + wrappers = on.make_wrappers(model,gs) + NFRB,AppMags,AppMagPriors,ObsMags,ObsPriors,ObsPosteriors,PUprior,PUobs,sumPUprior,sumPUobs,frbs,dms = on.calc_path_priors(frblist,ss,gs,wrappers,verbose=False) + + llstat = on.calculate_likelihood_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUobs,PUprior,plotfile=outfile) + ksstat = on.calculate_ks_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,sumPUobs, + sumPUprior,plotfile=outfile,abc="(c)",tag="Naive: ",) + + print("Best-fit stats of the naive model are ll=",llstat," ks = ",ksstat) + + # we determine the range of k which are compatible at 1,2,3 sigma using Wilks' theorem + # this states that 2*log(L(k)-L(k=0)) should be distributed according to a chi2 distribution + # with one degree of freedom + + ############ k-correction figure ############3 + # we generate a plot showing the convergence on k, i.e. how/why we get a best fit + llbest = llstat + nk=101 + kvals = np.linspace(-5,5,nk) + stats = np.zeros([nk]) + pvalues = np.zeros([nk]) + dlls = np.zeros([nk]) + for i,kcorr in enumerate(kvals): + if not minimise: + break + x[0] = kcorr + model.init_args(x) + wrappers = on.make_wrappers(model,gs) + NFRB,AppMags,AppMagPriors,ObsMags,ObsPriors,ObsPosteriors,PUprior,PUobs,sumPUprior,sumPUobs,frbs,dms = on.calc_path_priors(frblist,ss,gs,wrappers,verbose=False) + stat = on.calculate_likelihood_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUobs,PUprior) + stats[i] = stat + dll = 2.*(llbest-stat) * np.log(10) # stat returned in log base 10, needs to be natural log + p_wilks = 1.-chi2.cdf(dll,1) + pvalues[i] = p_wilks + dlls[i] = dll + + if minimise: + # save data if doing this for the first time + np.save(opdir+"/llk.npy",stats) + np.save(opdir+"/pvalues.npy",pvalues) + np.save(opdir+"/kvals.npy",kvals) + np.save(opdir+"/dlls.npy",dlls) + else: + # else, load it + stats = np.load(opdir+"/llk.npy") + pvalues = np.load(opdir+"/pvalues.npy") + kvals = np.load(opdir+"/kvals.npy") + dlls = np.load(opdir+"/dlls.npy") + + for i,k in enumerate(kvals): + print("p-value of ",k," is ",pvalues[i]) + + plt.figure() + l1,=plt.plot(kvals,stats,label="$\\log_{10} \\mathcal{L} (k)$") + #plt.yscale('log') + plt.xlabel('$k$') + plt.ylabel('$\\log_{10} \\mathcal{L} (k)$') + #plt.legend() + + ax2 = plt.gca().twinx() + l2,=ax2.plot(kvals,pvalues,color="black",linestyle=":",label="p-value") + plt.yscale('log') + plt.ylabel('p-value') + plt.ylim(1e-3,1.) + plt.legend(handles=[l1,l2],labels=["$\\log_{10} \\mathcal{L} (k)$","p-value"],loc="lower right") + + plt.tight_layout() + plt.savefig(opdir+'/pkvalue.png') + plt.close() + + + # calculates the original PATH result + outfile = opdir+"original_fit_apparent_magnitudes.png" + NFRB2,AppMags2,AppMagPriors2,ObsMags2,ObsPriors2,ObsPosteriors2,PUprior2,PUobs2,sumPUprior2,sumPUobs2,frbs,dms = on.calc_path_priors(frblist,ss,gs,wrappers,verbose=False,usemodel=False) + + # flattens lists of lists + ObsPosteriors = [x for xs in ObsPosteriors for x in xs] + ObsPosteriors2 = [x for xs in ObsPosteriors2 for x in xs] + + # plots original vs updated posteriors + plt.figure() + plt.xlabel("Original P") + plt.ylabel("Best fit P") + plt.xlim(0,1) + plt.ylim(0,1) + plt.scatter(ObsPosteriors2,ObsPosteriors,label="Hosts",marker='x') + plt.scatter(PUobs2,PUobs,label="Unobserved",marker='+') + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"Scatter_plot_comparison.png") + plt.close() + + ####### Plots that only make sense for specific models ########3 + + plt.figure() + plt.xlabel("Absolute magnitude, $M_r$") + plt.ylabel("$p(M_r)$") + plt.plot(model.AbsMags,model.AbsMagWeights/np.max(model.AbsMagWeights),label="interpolation") + + if dok: + toplot = x[1:] + else: + toplot = x + + if model.AbsModelID == 3: + toplot = 10**toplot + plt.plot(model.ModelBins,toplot/np.max(toplot),marker="o",linestyle="",label="Model Parameters") + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"best_fit_absolute_magnitudes.png") + plt.close() + + +main() + diff --git a/papers/pathpriors/get_pu_dist.py b/papers/pathpriors/get_pu_dist.py new file mode 100644 index 00000000..52818871 --- /dev/null +++ b/papers/pathpriors/get_pu_dist.py @@ -0,0 +1,167 @@ +""" +This file fits the simple (naive) model to CRAFT ICS observations. +It fits a model of absolute galaxy magnitude distributions, +uses zDM to predict redshifts and hence apparent magntidues, +runs PATH using that prior, and tries to get priors to match posteriors. + +It also geenrates host z-mr plots + +""" + + +#standard Python imports +import os +import numpy as np +from matplotlib import pyplot as plt +from scipy.optimize import minimize + +# imports from the "FRB" series +from zdm import optical as opt +from zdm import optical_params as op +from zdm import loading +from zdm import cosmology as cos +from zdm import parameters +from zdm import loading +from zdm import optical_numerics as on +from zdm import states + +from importlib import resources + +# other FRB library imports +import astropath.priors as pathpriors + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + + +def main(): + """ + Main function + Contains outer loop to iterate over parameters + + """ + ########## Sets optical parameters ######### + + opdir="Plots/" + # Case of simple host model + opstate = op.OpticalState() + + # sets optical state to use simple model + opstate.simple.AbsModelID = 1 # linear interpolation + opstate.simple.AppModelID = 1 # include k-correction + opstate.simple.NModelBins = 6 + opstate.simple.Absmin = -25 + opstate.simple.Absmax = -15 + + # sets the parameters of the P(O|m) function + TELs=["Pan-STARRS","Legacy Surveys","VLT/FORS2"] + TelMeans = [21.8,24.0,26.4] + TelSigmas = [0.54,0.55,0.28] + + + # Initlisation of zDM grid + # Eventually, this should be part of the loop, i.e. host IDs should + # be re-fed into FRB surveys. However, it will be difficult to do this + # with very limited redshift estimates. That might require posterior + # estimates of redshift given the observed galaxies. Maybe. + state = states.load_state("HoffmannHalo25",scat=None,rep=None) + #state = parameters.State() + + labels=['(a) ASKAP/ICS 900 MHz','(b) CHIME ($\delta=60^{\circ}$)','(c) MeerKAT coherent','(d) DSA'] + tags=['ASKAP','CHIME','MeerKAT','DSA'] + names=['CRAFT_ICS_892','CHIME/CHIME_decbin_3_of_6','MeerTRAPcoherent','DSA']#,'CRAFT_ICS_1300','CRAFT_ICS_1632'] + ss,gs = loading.surveys_and_grids(survey_names=names,init_state=state) + + + # initialise figure + styles=[":","--","-"] + NDM=20 + DMlist = np.linspace(50,1950,NDM) + + for j,g in enumerate(gs): + plt.figure() + plt.xlabel("$\\rm DM_{\\rm EG}$ [pc cm$^{-3}$]") + plt.ylabel("$P(U|{\\rm DM_{\\rm EG}})$") + tag = tags[j] + label = labels[j] + plt.text(-250,1.04,label) + for i in np.arange(3): + + opstate.id.pU_mean=TelMeans[i] + opstate.id.pU_width=TelSigmas[i] + + PUs = get_PUs(opstate,g,DMlist) + + if i==0: + l1,=plt.plot(DMlist,PUs[:,0],linestyle=styles[i]) + l2,=plt.plot(DMlist,PUs[:,1],linestyle=styles[i]) + l3,=plt.plot(DMlist,PUs[:,2],linestyle=styles[i]) + elif i==2: + plt.plot(DMlist,PUs[:,0],label="Marnoch23",linestyle=styles[i],color=l1.get_color()) + plt.plot(DMlist,PUs[:,1],label="Loudas25",linestyle=styles[i],color=l2.get_color()) + plt.plot(DMlist,PUs[:,2],label="Naive",linestyle=styles[i],color=l3.get_color()) + else: + plt.plot(DMlist,PUs[:,0],linestyle=styles[i],color=l1.get_color()) + plt.plot(DMlist,PUs[:,1],linestyle=styles[i],color=l2.get_color()) + plt.plot(DMlist,PUs[:,2],linestyle=styles[i],color=l3.get_color()) + # plot kind of optical observation + for i in np.arange(3): + Tlabel=TELs[i] + plt.plot([--100,-50],[-10,-10],color="black",label=Tlabel,linestyle=styles[i]) + + plt.ylim(0,1) + plt.xlim(0,2000) + if j==0: + plt.legend(fontsize=12) + plt.tight_layout() + plt.savefig(opdir+tag+"pu_all.png") + plt.close() + + +def get_PUs(opstate,g,DMlist): + """ + Returns P(U) array for three models + """ + ######### Initialise models ########### + + # model 1: Marnoch + model1 = opt.marnoch_model(opstate) + + # Loudas + model2 = opt.loudas_model(opstate) + xbest = np.load("loudas_output/best_fit_params.npy") + model2.init_args(xbest) + + + model3 = opt.simple_host_model(opstate) + + # retrieve default initial arguments in vector form + xbest = np.load("simple_output/best_fit_params.npy") + model3.init_args(xbest) + + + wrapper1 = opt.model_wrapper(model1,g.zvals) + wrapper2 = opt.model_wrapper(model2,g.zvals) + wrapper3 = opt.model_wrapper(model3,g.zvals) + wrappers = [wrapper1,wrapper2,wrapper3] + + # iterates of DM list + NM=len(wrappers) + NDM = len(DMlist) + PUs = np.zeros([NDM,NM]) + for i,DMEG in enumerate(DMlist): + for j,wrapper in enumerate(wrappers): + wrapper.init_path_raw_prior_Oi(DMEG,g) + PU = wrapper.estimate_unseen_prior() + PUs[i,j]=PU + #print(DMEG,PU) + + return PUs + +main() diff --git a/papers/pathpriors/pO_g_mr/fit_pogmr.py b/papers/pathpriors/pO_g_mr/fit_pogmr.py new file mode 100644 index 00000000..351ae1c1 --- /dev/null +++ b/papers/pathpriors/pO_g_mr/fit_pogmr.py @@ -0,0 +1,86 @@ +""" + +This script fits data to the p(U|mr) data, i.e. the probability that +a galaxy is unseen given it has an intrinsic magnitude of m_r. + +The functional form of the fits is given by opt.pUgm + +The data is provided by Michelle Woodland (for CRAFT) +and Bridget Anderson (data to be published) + + +""" + + + +from zdm import optical as opt +from matplotlib import pyplot as plt +import numpy as np +import pandas as pd +from scipy.optimize import curve_fit + + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + + + +# relevant for CRAFT, from Bridget +df = pd.read_csv("pu_mr_vs_mag_CRAFT_VLT_FORS2_r.csv") +result = curve_fit(opt.pUgm,df['mag'],df['PU_mr'],p0=[26.5,0.3]) +CRAFT_result = result[0] +CRAFT_pogm = opt.pUgm(df['mag'],result[0][0],result[0][1]) + +# Legacy surveys - from Bridget Anderson +Lmags = [11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, + 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0] +LpU_mr = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.027122579985493736, + 0.02436363700867187, 0.014231286256411943, 0.047649506708623335, + 0.15510269056554593, 0.4759090774425562, 0.8798642289140987, 1.0, + 0.980904733057884] +result = curve_fit(opt.pUgm,Lmags,LpU_mr,p0=[26.5,0.3]) +Legacy_result = result[0] +Legacy_fit = opt.pUgm(Lmags,result[0][0],result[0][1]) + +#Pan-STARRS: from Bridget Anderson +PSmags = [11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, + 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0] +PSpU_mr = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.05338978533359865, + 0.0753794861936344, 0.17783737932531407, 0.6123903316329317, + 0.9170697731444298, 0.9736154053312236, 1.0, 0.9704756326566667, + 0.9993072645593615] +result = curve_fit(opt.pUgm,PSmags,PSpU_mr,p0=[26.5,0.3]) +PS_result = result[0] +PS_fit = opt.pUgm(PSmags,result[0][0],result[0][1]) + +print("Fit results are...") +print(" PanSTARSS: ",PS_result) +print(" Legacy ",Legacy_result) +print(" VLT/FORS2 ",CRAFT_result) + +LpU_mr = np.array(LpU_mr) +PSpU_mr = np.array(PSpU_mr) + +plt.figure() + +plt.plot(df['mag'],1.-df['PU_mr'],label="VLT/FORS2") +plt.plot(df['mag'],1.-CRAFT_pogm,label=" (fit)",linestyle="--",color = plt.gca().lines[-1].get_color()) +plt.plot(Lmags,1.-LpU_mr,label="Legacy surveys") +plt.plot(Lmags,1.-Legacy_fit,label=" (fit)",linestyle="--",color = plt.gca().lines[-1].get_color()) +plt.plot(PSmags,1.-PSpU_mr,label="Pan-STARRS") +plt.plot(PSmags,1.-PS_fit,label=" (fit)",linestyle="--",color = plt.gca().lines[-1].get_color()) +plt.legend() +plt.xlim(15,30) +plt.ylim(0,1) +plt.xlabel("$m_r$") +plt.ylabel("$P(O|m_r)$") +plt.tight_layout() +plt.savefig("pOgm.png") +plt.close() + diff --git a/papers/pathpriors/pO_g_mr/pu_mr_vs_mag_CRAFT_VLT_FORS2_r.csv b/papers/pathpriors/pO_g_mr/pu_mr_vs_mag_CRAFT_VLT_FORS2_r.csv new file mode 100644 index 00000000..3c354326 --- /dev/null +++ b/papers/pathpriors/pO_g_mr/pu_mr_vs_mag_CRAFT_VLT_FORS2_r.csv @@ -0,0 +1,28 @@ +mag,PU_mr +16.25,0.0 +16.75,0.0 +17.25,0.0 +17.75,0.0 +18.25,0.0 +18.75,0.0 +19.25,0.0 +19.75,0.0 +20.25,0.0 +20.75,0.0 +21.25,0.0 +21.75,0.0 +22.25,0.0 +22.75,0.0 +23.25,0.0 +23.75,0.0 +24.25,0.0 +24.75,0.0 +25.25,0.048275021085262715 +25.75,0.24650222799336496 +26.25,0.5408575679924887 +26.75,0.8386567720731101 +27.25,0.9805334874938698 +27.75,0.9998839106072703 +28.25,1.0 +28.75,1.0 +29.25,1.0 diff --git a/papers/pathpriors/plot_colors.py b/papers/pathpriors/plot_colors.py new file mode 100644 index 00000000..5cea5784 --- /dev/null +++ b/papers/pathpriors/plot_colors.py @@ -0,0 +1,70 @@ + +import numpy as np +from matplotlib import pyplot as plt +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + + + + +# hard-coded color-color data + +# array of g and I bands +gI = [ [21.23,19.875], + [22.24,21.17], + [22.59,21.10], + [18.167,17.097], + [24.02,22.41], + [23.87,22.4], + [21.037,19.618], + [19.103,17.743], + [23.3,21.90], + [21.856,20.61], + [20.910,19.564], + [23.86,22.68], + [20.476,19.194], + [18.128,16.476], + [15.819,14.860], + [17.184,16.212], + [21.49,20.47], + [18.529,17.232]] + +# hard-coded g minus R colours +gR = [[24.02,23.03],[20.842,20.258],[24.22,23.72],[18.529,17.843]] + +RI = [[23.03,22.41],[17.843,17.232]] + +# convert to numpy +gI = np.array(gI) +gR = np.array(gR) +RI = np.array(RI) + + +plt.xlabel("$g-R$") +bins = np.linspace(0,2,21) + + + +print("Mean g minus I is ",np.mean(gI[:,0]-gI[:,1]),gI[:,1].size) +print("Mean R minus I is ",np.mean(RI[:,0]-RI[:,1]),RI[:,1].size) +print("Mean g minus R is ",np.mean(gR[:,0]-gR[:,1]),gR[:,1].size) +plt.figure() +plt.xlim(0.8,1.8) +plt.yticks(np.linspace(0,4,5)) +plt.hist(gI[:,0]-gI[:,1],bins=bins,label="$m_g-m_I$",alpha=0.5) +plt.hist(2.*(gR[:,0]-gR[:,1]),bins=bins,label="$2(m_g-m_R)$",alpha=0.5) +plt.hist(2.*(RI[:,0]-RI[:,1]),bins=bins,label="$2(m_R-m_I)$",alpha=0.5) +plt.legend(loc = "upper left") +plt.xlabel("colour") +plt.ylabel("counts") +plt.tight_layout() +plt.savefig("color_correction.png") +plt.close() + + diff --git a/papers/pathpriors/plot_craft_optical_data.py b/papers/pathpriors/plot_craft_optical_data.py new file mode 100644 index 00000000..dc21e0d3 --- /dev/null +++ b/papers/pathpriors/plot_craft_optical_data.py @@ -0,0 +1,161 @@ +""" +This file generates plots of the CRAFT host galaxy candidates. + +Output is placed in Figures +""" + + +#standard Python imports +import os +import numpy as np +import pandas as pd +from matplotlib import pyplot as plt +from importlib import resources +from scipy.integrate import quad + +# imports from the "FRB" series +from zdm import frb_lists as lists +from zdm import optical as opt +from frb.frb import FRB +from astropath import chance + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + + + +def main(): + """ + Main function + Contains outer loop to iterate over parameters + + """ + + # gets this + frblist = lists.icslist + + maglist=[] + anglist=[] + + + for i,frb in enumerate(frblist): + my_frb = FRB.by_name(frb) + # reads in galaxy info + ppath = os.path.join(resources.files('frb'), 'data', 'Galaxies', 'PATH') + pfile = os.path.join(ppath, f'{my_frb.frb_name}_PATH.csv') + ptbl = pd.read_csv(pfile) + candidates = ptbl[['ang_size', 'mag', 'ra', 'dec', 'separation']] + + maglist = maglist + list(candidates['mag'].values) + anglist = anglist + list(candidates['ang_size'].values) + + + ngs = len(maglist) + weights = np.full([ngs],1.) + + # gets most likely ics hosts + z,mr,w = read_craft() + weights = np.append(weights,-w) + tempmaglist = np.append(maglist,mr) + + + + # plots histograms + NM=21 + mags = np.linspace(10,30,NM) + mids = (mags[1:] + mags[:-1])/2. + #pmags = np.zeros([NM-1]) + #for i,mag in enumerate(mids): + # pmags[i] = chance.driver_sigma(mag) + pmags = int_driver(mags) + + plt.figure() + plt.hist(maglist,label="optical images",bins=mags) + plt.hist(tempmaglist,label="'host' subtracted",weights=weights,bins=mags) + plt.xlabel("Apparent magnitude, $m$") + plt.ylabel("Number of galaxies") + + # arbitrary normalisation + pmags = 2.*pmags*80/pmags[12] + + plt.plot(mids,pmags,label="Driver et al. 2016",linewidth=3,linestyle="--") + plt.legend() + plt.xlim(14,28) + plt.xticks(np.linspace(14,28,8)) + plt.ylim(0,100) + plt.tight_layout() + plt.savefig("Figures/mag_hist.png") + plt.close() + + # gets the ratio + h,b = np.histogram(maglist,bins=mags) + ratio = h/pmags + + plt.figure() + plt.plot(mids,ratio) + plt.xlabel("Apparent magnitude, $m$") + plt.ylabel("ratio: CRAFT/Driver et al") + plt.yscale("log") + plt.tight_layout() + plt.savefig("Figures/ratio.png") + plt.close() + + plt.figure() + plt.scatter(maglist,anglist) + plt.xlabel("Apparent magnitude, $m$") + plt.ylabel("Angular size, $\\theta$ [']") + plt.tight_layout() + plt.savefig("Figures/ang_mag.png") + plt.close() + + + + # creates a plot to check the normalisation of driver et al + + plt.figure() + + plt.xlim(10,30) + plt.ylim(-2,6) + + mags = np.linspace(10,30,21) + driver = chance.driver_sigma(mags) + plt.plot(mags,np.log10(driver*3600*3600/2),color="black") + plt.tight_layout() + plt.savefig("driver_test.png") + plt.close() + +def int_driver(bins): + """ + Integrates the driver et al formula over the magnitude bins + + Args: + bins: bins in r-band magnitude + """ + + nbins = len(bins) + integral = np.zeros([nbins-1]) + for i in np.arange(nbins-1): + result = quad(chance.driver_sigma,bins[i],bins[i+1]) + integral[i] = result[0] + return integral + +def read_craft(): + """ + CRAFT ICS data + """ + + DF = pd.read_csv("../lsst/Data/craft_ics_hosts.csv") + + z = np.array(DF["z"]) + mr = np.array(DF["mr"]) + nfrb = len(mr) + w = np.full([nfrb],1.) # artificial, but all are highy confidence + return z,mr,w + +main() diff --git a/papers/pathpriors/plot_host_models.py b/papers/pathpriors/plot_host_models.py new file mode 100644 index 00000000..6ccbfe00 --- /dev/null +++ b/papers/pathpriors/plot_host_models.py @@ -0,0 +1,139 @@ +""" +Used to generate final fitted P(m|DM) figures + +""" + +#standard Python imports +import os +import numpy as np +from matplotlib import pyplot as plt + +# imports from the "FRB" series +from zdm import optical as opt +from zdm import optical_params as op +from zdm import optical_numerics as on +from zdm import loading +from zdm import cosmology as cos +from zdm import parameters +from zdm import loading +from zdm import frb_lists as lists + +import astropath.priors as pathpriors + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + +def calc_path_priors(): + """ + Loops over all ICS FRBs + """ + + opdir = "Plots/" + if not os.path.exists(opdir): + os.mkdir(opdir) + ##### performs the following calculations for the below combinations ###### + + ######## initialises optical-independent info ######## + #frblist is a hard-coded list of FRBs for which we have optical PATH data + frblist = lists.icslist + NFRB = len(frblist) + + + state = parameters.State() + cos.set_cosmology(state) + cos.init_dist_measures() + + ##### makes a plot of host priors for the simple model ###### + + # simple host model + # Case of simple host model + opstate1 = op.OpticalState() + # sets optical state to use simple linear interpolation + opstate1.simple.AbsModelID = 1 # linear interpolation + opstate1.simple.AppModelID = 1 # include k-correction + opstate1.simple.NModelBins = 6 + opstate1.simple.Absmin = -25 + opstate1.simple.Absmax = -15 + model1 = opt.simple_host_model(opstate1) + # this is from an initial estimate. Currently, no way to enter this into the opstate. To do. + xbest = np.load("simple_output/best_fit_params.npy") + model1.init_args(xbest) + + + model2=opt.loudas_model() + xbest = np.load("loudas_output/best_fit_params.npy") + model2.init_args(xbest) # best-fit arguments + + # set up basic histogram of p(mr) distribution + mrbins = np.linspace(0,40,401) + mrvals=(mrbins[:-1]+mrbins[1:])/2. + dmr = mrbins[1]-mrbins[0] + + model3 = opt.marnoch_model() + + ######### Plots apparent mag distribution for all models as function of z ####### + styles=["-",":","--","-."] + + plt.figure() + flist=[1,3] #normal distributions, and best fit + + for i,z in enumerate([0.1,1.0]): + + # simple model + pmr = model1.get_pmr_gz(mrbins,z) + pmr /= np.sum(pmr) + + if i==0: + plt.plot(mrvals,pmr/dmr,label="Naive",linestyle=styles[0]) + else: + plt.plot(mrvals,pmr/dmr,linestyle=styles[0],\ + color=plt.gca().lines[0].get_color()) + + pmr = model3.get_pmr_gz(mrbins,z) + if i==0: + plt.plot(mrvals,pmr/dmr,label = "Marnoch23",linestyle=styles[3]) + else: + plt.plot(mrvals,pmr/dmr,linestyle=styles[3],color=plt.gca().lines[1].get_color()) + + + # Loudas model dependencies + for j,fsfr in enumerate(flist): + model2.init_args(fsfr) + pmr = model2.get_pmr_gz(mrbins,z) + pmr /= np.sum(pmr) + if i==0: + plt.plot(mrvals,pmr/dmr,label = "Loudas25 ($f_{\\rm sfr}$ = "+str(fsfr)+")", + linestyle=styles[j+1]) + else: + plt.plot(mrvals,pmr/dmr,linestyle=styles[j+1],\ + color=plt.gca().lines[j+2].get_color()) + + + + plt.xlabel("Apparent magnitude $m_r$") + plt.ylabel("$P(m_r|z)$") + plt.text(17.5,0.285,"$z=0.1$") + plt.text(22,0.285,"$z=1.0$") + plt.xlim(12.5,30) + plt.ylim(0,0.4) + plt.legend(loc="upper right",ncol=2) + #plt.legend(loc=[25,0.35]) + + plt.tight_layout() + plt.savefig(opdir+"all_model_apparent_mags.png") + plt.close() + + + +if __name__ == "__main__": + + calc_path_priors() + + + diff --git a/papers/pathpriors/plot_marnoch_model.py b/papers/pathpriors/plot_marnoch_model.py new file mode 100644 index 00000000..d4d1ecb9 --- /dev/null +++ b/papers/pathpriors/plot_marnoch_model.py @@ -0,0 +1,87 @@ +""" +This file compares predictions and outputs for the model of Marnoch et al +""" + + +#standard Python imports +import os +import numpy as np +from matplotlib import pyplot as plt +from scipy.optimize import minimize + +# imports from the "FRB" series +from zdm import optical as opt +from zdm import optical_params as op +from zdm import loading +from zdm import cosmology as cos +from zdm import parameters +from zdm import loading +from zdm import optical_numerics as on +from zdm import states +from zdm import frb_lists as lists + +# other FRB library imports +import astropath.priors as pathpriors + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + + +def main(): + """ + Main function + Contains outer loop to iterate over parameters + + """ + + ######### List of all ICS FRBs for which we can run PATH ####### + # hard-coded list of FRBs with PATH data in ice paper + frblist = lists.icslist + + # Initlisation of zDM grid + # Eventually, this should be part of the loop, i.e. host IDs should + # be re-fed into FRB surveys. However, it will be difficult to do this + # with very limited redshift estimates. That might require posterior + # estimates of redshift given the observed galaxies. Maybe. + state = states.load_state("HoffmannHalo25",scat=None,rep=None) + #state = parameters.State() + cos.set_cosmology(state) + cos.init_dist_measures() + names=['CRAFT_ICS_892','CRAFT_ICS_1300','CRAFT_ICS_1632'] + ss,gs = loading.surveys_and_grids(survey_names=names,init_state=state) + + + ######## Determnine which statistic to use in optimisation ######## + # setting istat=0 means using a ks statistic to fit p(m_r) + # setting istat=1 means using a maximum likelihood estimator + istat=1 + # determines which model to use + modelname = "marnoch" + + opdir = modelname+"_output/" + POxcut = None # set to e.g. 0.9 to reject FRBs with lower posteriors when doing model comparisons + + if not os.path.exists(opdir): + os.mkdir(opdir) + + model = opt.marnoch_model() + + outfile = opdir+"marnoch_best_fit_apparent_magnitudes.png" + wrappers = on.make_wrappers(model,gs) + NFRB,AppMags,AppMagPriors,ObsMags,ObsPriors,ObsPosteriors,PUprior,PUobs,sumPUprior,sumPUobs,frbs,dms = on.calc_path_priors(frblist,ss,gs,wrappers,verbose=False) + llstat = on.calculate_likelihood_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUobs,PUprior,plotfile=outfile) + + ksstat = on.calculate_ks_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,sumPUobs, + sumPUprior,plotfile=outfile,abc="(a)",tag="Marnoch23: ") + + print("Best-fit stats of the Marnoch model are ll=",llstat," ks = ",ksstat) + + + +main() diff --git a/papers/pathpriors/simple_systematics.py b/papers/pathpriors/simple_systematics.py new file mode 100644 index 00000000..f68e9daf --- /dev/null +++ b/papers/pathpriors/simple_systematics.py @@ -0,0 +1,252 @@ +""" +This file fits the simple (naive) model to CRAFT ICDS observations. +It varies the simple model parameterisation, to determine systematic effects +""" + + +#standard Python imports +import os +import numpy as np +from matplotlib import pyplot as plt +from scipy.optimize import minimize +from scipy.stats import chi2 + +# imports from the "FRB" series +from zdm import optical as opt +from zdm import optical_params as op +from zdm import loading +from zdm import cosmology as cos +from zdm import parameters +from zdm import loading +from zdm import optical_numerics as on +from zdm import states +from zdm import frb_lists as lists + +# other FRB library imports +import astropath.priors as pathpriors + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + + + + +def main(): + """ + Main routine + Loops over parameterisations, and plots best fits + """ + + ######### List of all ICS FRBs for which we can run PATH ####### + # hard-coded list of FRBs with PATH data in ice paper + frblist = lists.icslist + + # Initlisation of zDM grid + # Eventually, this should be part of the loop, i.e. host IDs should + # be re-fed into FRB surveys. However, it will be difficult to do this + # with very limited redshift estimates. That might require posterior + # estimates of redshift given the observed galaxies. Maybe. + state = states.load_state("HoffmannHalo25",scat=None,rep=None) + + # initialise cosmology + cos.set_cosmology(state) + cos.init_dist_measures() + + names=['CRAFT_ICS_892','CRAFT_ICS_1300','CRAFT_ICS_1632'] + ss,gs = loading.surveys_and_grids(survey_names=names,init_state=state) + + + NBinList = [6,11,21] + AbsminList = np.linspace(-26,-25,11) + AbsmaxList = AbsminList + 10. # increases max, but not in same way + + plt.figure() + plt.xlabel("Absolute magnitude, $M_r$") + plt.ylabel("$p(M_r)$") + + count = 0 + + opdir = "simple_systematics/" + if not os.path.exists(opdir): + os.mkdir(opdir) + + load = True + + colours = ["grey","orange","blue"] + markers = ['o','x','s'] + xlist = [] + llstats=[] + ksstats=[] + kvals=[] + for i,NModelBins in enumerate(NBinList): + llstats.append([]) + ksstats.append([]) + kvals.append([]) + for j,Absmin in enumerate(AbsminList): + Absmax = AbsmaxList[j] + fname1 = opdir + "bins_"+str(count)+".npy" + fname2 = opdir + "allx_"+str(count)+".npy" + + + print("Doing model with ",Absmin,Absmax,NModelBins) + + opstate = op.OpticalState() + # sets optical state to use simple linear interpolation + opstate.simple.AbsModelID = 1 + opstate.simple.AppModelID = 1 # k-correction + opstate.simple.Absmin = Absmin + opstate.simple.Absmax = Absmax + opstate.simple.NModelBins = NModelBins + + if load: + bins = np.load(fname1) + allx = np.load(fname2) + model = opt.simple_host_model(opstate) + else: + + #AbsMags = np.linspace(Ansmin,Absmax,NAbsBins) + allx,model = get_best_fit(ss,gs,frblist,opstate) + + bins = model.ModelBins + np.save(fname1,bins) + np.save(fname2,allx) + + + model.init_args(allx) + wrappers = on.make_wrappers(model,gs) + NFRB,AppMags,AppMagPriors,ObsMags,ObsPriors,ObsPosteriors,PUprior,PUobs,sumPUprior,sumPUobs,frbs,dms = on.calc_path_priors(frblist,ss,gs,wrappers,verbose=False) + + llstat = on.calculate_likelihood_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUobs,PUprior) + ksstat = on.calculate_ks_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,sumPUobs, + sumPUprior) + + llstats[i].append(llstat) + ksstats[i].append(ksstat) + kvals[i].append(allx[0]) + x = allx[1:] + xlist.append(x) + if j==0: + plt.plot(bins,x/np.sum(x)*NModelBins,color=colours[i],label=str(NModelBins)+" bins",marker=markers[i]) + #plt.plot(bins,x/np.sum(x),marker="o",linestyle="",color=colours[i]) + else: + plt.plot(bins,x/np.sum(x)*NModelBins,color=colours[i],marker=markers[i]) + #plt.plot(bins,x/np.sum(x),marker="o",linestyle="",color=colours[i]) + + + + count += 1 + + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"model_systematics.png") + plt.close() + + plt.figure() + print("kvals") + for i,NModelBins in enumerate(NBinList): + plt.plot(kvals[i],label="Nbins = "+str(NModelBins),linestyle="",marker='o') + print(i,NModelBins,np.mean(kvals[i]),np.std(kvals[i])) + plt.savefig(opdir+"kvals.png") + plt.close() + + plt.figure() + print("ks stats") + for i,NModelBins in enumerate(NBinList): + plt.plot(ksstats[i],label="Nbins = "+str(NModelBins),linestyle="",marker='o') + print(i,NModelBins,np.mean(ksstats[i]),np.std(ksstats[i])) + plt.savefig(opdir+"ksstats.png") + plt.close() + + plt.figure() + print("llstats") + for i,NModelBins in enumerate(NBinList): + plt.plot(llstats[i],label="Nbins = "+str(NModelBins),linestyle="",marker='o') + print(i,NModelBins,np.mean(llstats[i]),np.std(llstats[i])) + plt.savefig(opdir+"llstats.png") + plt.close() + + for j,Absmin in enumerate(AbsminList): + # three tests : 20 vs 10, 10 vs 5, 20 vs 5 + dl02 = -2. * (llstats[0][j]-llstats[2][j]) + dl01 = -2. * (llstats[0][j]-llstats[1][j]) + dl12 = -2. * (llstats[1][j]-llstats[2][j]) + + ddf02 = 15 + ddf01 = 5 + ddf12 = 10 + + p02 = 1.-chi2.cdf(dl02,ddf02) + p12 = 1.-chi2.cdf(dl12,ddf12) + p01 = 1.-chi2.cdf(dl01,ddf01) + print(j, "th offset: p-values for 5-10, 10-20, and 5-20 are ",p01,p12,p02, " with stats ",dl01,dl12,dl02) + +def get_best_fit(ss,gs,frblist,opstate): + """ + Fits simple model parameterised by: + + Args: + ss: list of survey objects + gs: list of grid objects + frblist: list of FRBs to process + opstate: optical state to be used in modelling + + Returns: + Best-fit parameters + + """ + + + ######## Determnine which statistic to use in optimisation ######## + # setting istat=0 means using a ks statistic to fit p(m_r) + # setting istat=1 means using a maximum likelihood estimator + istat=1 + # dok=True means use the k-correction + dok = True + # we are using the simple model + modelname = "simple" + # set to e.g. 0.9 to reject FRBs with lower posteriors when doing model comparisons + POxcut = None + + + # Case of simple host model + # Initialisation of model + # simple host model + + # sets up initial bounds on variables + if dok: + opstate.simple.AppModelID = 1 # k-correction + Nparams = opstate.simple.NModelBins+1 + opstate.simple.AppModelID = 1 # sets to include k-correction + opstate.simple.k = 0.5 # for some reason, this just doesn't make much difference to results + bounds = [(-25,25)]+[(0,1)]*(Nparams-1) + else: + Nparams = opstate.simple.NModelBins + # bins now give log-space values, hence -5,2 is range of 10^7 + if opstate.simple.AbsModelID == 3: + base=(-5,2) # log space + else: + base=(0,1) # linear space + bounds = [base]*(Nparams) + opstate.simple.AppModelID = 0 # no k-correction + + model = opt.simple_host_model(OpticalState = opstate) + + # retrieve default initial arguments in vector form + x0 = model.get_args() + # initialise aguments to minimisation function + args=[frblist,ss,gs,model,POxcut,istat] + + # set to false to just use hard-coded best fit parameters + minimise=True + result = minimize(on.function,x0 = x0,args=args,bounds = bounds) + x = result.x + return x,model + +main() + diff --git a/setup.cfg b/setup.cfg index b8270df0..712df842 100644 --- a/setup.cfg +++ b/setup.cfg @@ -48,6 +48,7 @@ install_requires = cmasher>=1.9 ne2001 @ git+https://github.com/FRBs/ne2001 frb @ git+https://github.com/FRBs/FRB + astropath @ git+https://github.com/FRBs/astropath [options.extras_require] test = diff --git a/zdm/MCMC.py b/zdm/MCMC.py index 5cf95581..0c3aa0d6 100644 --- a/zdm/MCMC.py +++ b/zdm/MCMC.py @@ -190,7 +190,6 @@ def calc_log_posterior(param_vals, state, params, surveys_sep, Pn=False, pNreps= llsum += ll if ind_surveys: ll_list.append(ll) - #except ValueError as e: # print("Error, setting likelihood to -inf: " + str(e)) # llsum = -np.inf diff --git a/zdm/data/Optical/zvals.npy b/zdm/data/Optical/zvals.npy new file mode 100644 index 00000000..74dcffd0 Binary files /dev/null and b/zdm/data/Optical/zvals.npy differ diff --git a/zdm/data/Surveys/CRAFT_CRACO_900.ecsv b/zdm/data/Surveys/CRAFT_CRACO_900.ecsv index 8f2d14aa..b8ea366f 100644 --- a/zdm/data/Surveys/CRAFT_CRACO_900.ecsv +++ b/zdm/data/Surveys/CRAFT_CRACO_900.ecsv @@ -16,7 +16,7 @@ # - {name: FBAR, datatype: float64} # - {name: BW, datatype: float64} # meta: !!omap -# - {survey_data: '{"observing": {"NORM_FRB": 17,"TOBS": 64.68,"MAX_IW": 8, "MAXWMETH": 2}, +# - {survey_data: '{"observing": {"NORM_FRB": 17,"TOBS": 64.68,"MAX_IDT": 4096,"MAX_IW": 8, "MAXWMETH": 2}, # "telescope": {"BEAM": "CRACO_900", "DMMASK": "craco_900_mask.npy", # "DIAM": 12.0, "NBEAMS": 1, "NBINS": 5, "FBAR": 906, # "TRES": 13.8, "FRES": 1.0, "THRESH": 1.01, "SNRTHRESH": 9.5}}'} diff --git a/zdm/data/Surveys/MeerTRAPcoherent.ecsv b/zdm/data/Surveys/MeerTRAPcoherent.ecsv index accc2daf..97ea8062 100644 --- a/zdm/data/Surveys/MeerTRAPcoherent.ecsv +++ b/zdm/data/Surveys/MeerTRAPcoherent.ecsv @@ -18,7 +18,7 @@ # - {survey_data: '{"observing": {"NORM_FRB": 1,"TOBS": 317.5}, # "telescope": {"BEAM": "MeerTRAP_coherent_log", # "BTHRESH": 0.25,"NBEAMS": 1,"NBINS": 5, -# "MAX_IDT": 1078, "FRES":0.836,"THRESH":0.069, +# "FRES":0.836,"THRESH":0.069, # "TRES": 0.30624, "FBAR":1284}}'} # schema: astropy-2.0 TNS BW DM DMG FBAR FRES NREP SNR SNRTHRESH THRESH TRES WIDTH Z diff --git a/zdm/data/optical/magnitudes_and_probabilities_vlt-fors2_R-SPECIAL.ecsv b/zdm/data/optical/magnitudes_and_probabilities_vlt-fors2_R-SPECIAL.ecsv new file mode 100644 index 00000000..eadfd47a --- /dev/null +++ b/zdm/data/optical/magnitudes_and_probabilities_vlt-fors2_R-SPECIAL.ecsv @@ -0,0 +1,2143 @@ +# %ECSV 1.0 +# --- +# datatype: +# - {name: FRB20180301A, datatype: float64} +# - {name: FRB20180916B, datatype: float64} +# - {name: FRB20190520B, datatype: float64} +# - {name: FRB20201124A, datatype: float64} +# - {name: FRB20210410D, datatype: float64} +# - {name: FRB20121102A, datatype: float64} +# - {name: FRB20180924B, datatype: float64} +# - {name: FRB20181112A, datatype: float64} +# - {name: FRB20190102C, datatype: float64} +# - {name: FRB20190608B, datatype: float64} +# - {name: FRB20190611B, datatype: float64} +# - {name: FRB20190711A, datatype: float64} +# - {name: FRB20190714A, datatype: float64} +# - {name: FRB20191001A, datatype: float64} +# - {name: FRB20200430A, datatype: float64} +# - {name: FRB20200906A, datatype: float64} +# - {name: FRB20210117A, datatype: float64} +# - {name: FRB20210320C, datatype: float64} +# - {name: FRB20210807D, datatype: float64} +# - {name: FRB20211127I, datatype: float64} +# - {name: FRB20211203C, datatype: float64} +# - {name: FRB20211212A, datatype: float64} +# - {name: FRB20220105A, datatype: float64} +# - {name: z, datatype: float64} +# - {name: n>lim, datatype: int64} +# - {name: nlim n 0.: + self.smear_zgrid = self.smear_z(self.rates,self.zsigma) + self.rates=self.smear_zgrid def get_rates(self): """ @@ -558,6 +578,7 @@ def get_rates(self): rates[:,self.survey.max_idm+1:]=0. return rates + def calc_thresholds(self, F0:float, eff_table, bandwidth=1e9, @@ -713,13 +734,16 @@ def GenMCSample(self, N, Poisson=False): # Regen if the survey would not find this FRB frb = self.GenMCFRB(Emax_boost) + # This is a pretty naive method of generation. - while frb[1] > self.survey.max_dm: - print("Regenerating MC FRB with too high DM ",frb[1],self.survey.max_dm) - frb = self.GenMCFRB(Emax_boost) + if self.survey.max_dmeg is not None: + while frb[1] > self.survey.max_dm: + print("Regenerating MC FRB with too high DM ",frb[1],self.survey.max_dm) + frb = self.GenMCFRB(Emax_boost) sample.append(frb) - + + sample = np.array(sample) return sample @@ -767,9 +791,18 @@ def initMC(self): pzDM [:,setDMzero] = 0. # weighted pzDM - wb_fraction = (self.beam_o[i] * w * pzDM) + wb_fraction = (self.beam_o[i]* w * pzDM) pdv = np.multiply(wb_fraction.T, self.dV).T rate = pdv * self.sfr_smear + + # We do not implement photo-z smearing here + # the MC generates truth values of parameters + # smearing can be done very simple afterwards + # this smears the + #if self.survey.observing.Z_PHOTO > 1.: + # rate = self.smear_z(rate,self.survey.observing.Z_PHOTO) + # #rate=np.copy(self.smear_zgrid) + rates.append(rate) pwb[i * nw + j] = np.sum(rate) @@ -788,7 +821,7 @@ def initMC(self): # saves individal wxb zDM rates for sampling these distributions self.MCrates = rates - + # saves projections onto z-axis self.MCpzcs = pzcs @@ -854,7 +887,6 @@ def GenMCFRB(self, Emax_boost): # get p(z,DM) distribution for this b,w pzDM = self.MCrates[which] - pzc = self.MCpzcs[which] r = np.random.rand(1)[0] @@ -1230,4 +1262,90 @@ def chk_upd_param(self, param: str, vparams: dict, update=False): # return updated + def smear_z(self,array,zsigma): + """ + Smear a 2-D z-DM grid along the redshift axis to account for + photometric redshift uncertainty. + + When a survey uses photometric rather than spectroscopic redshifts, + the true redshift of each FRB host is uncertain. This method convolves + each column of the grid (i.e. each fixed-DM slice along the z axis) + with a Gaussian kernel whose standard deviation equals ``zsigma``, + redistributing probability across neighbouring redshift bins. + + The kernel is truncated at ``state.photo.sigma_width`` standard + deviations on each side (default 6σ), rounded up to an odd number of + bins so that it is centred exactly on zero. + + Parameters + ---------- + array : np.ndarray, shape (Nz, NDM) + Input 2-D grid with redshift along axis 0 and DM along axis 1. + zsigma : float + Photometric redshift uncertainty (1σ), in the same units as + ``self.zvals`` (i.e. dimensionless redshift). + + Returns + ------- + smear_zgrid : np.ndarray, shape (Nz, NDM) + Copy of ``array`` with each DM column convolved along the z axis + by the Gaussian smearing kernel. Boundary effects are handled with + ``np.convolve`` mode ``"same"``, so the output has the same shape + as the input. + + Notes + ----- + The kernel width in grid bins is ``zsigma / self.dz``. Values near the + grid edges will be underestimated because the convolution truncates to + zero outside the grid; for well-chosen grid extents this edge effect is + negligible. + + In ``calc_rates``, this method is called with ``zsigma`` taken from + ``self.survey.survey_data.observing.Z_PHOTO`` and applied to + ``self.rates`` after the FRB rate grid has been computed. + """ + r,c=array.shape + # get sigma in grid units + sigma=zsigma/(self.dz) + smear_size=int(self.state.photo.sigma_width*sigma) + smear_size=smear_size-smear_size%2+1 + smear_arr=np.linspace(-(smear_size-1)/2,(smear_size-1)//2,smear_size) + + # makes the approximation of taking the central value in the bin. + smear_arr=np.exp(-(smear_arr**2)/(2*(sigma**2))) + #normalise + smear_arr/=np.sum(smear_arr) + + smear_zgrid=np.zeros([r,c]) + for i in range(c): + smear_zgrid[:,i]=np.convolve(array[:,i],smear_arr,mode="same") + + return smear_zgrid + + def construct_fz(self,ffile,zfile): + """ + linearly interpolates passed fz values onto own zvals array + + Args: + ffile (string): file containing fraction of hosts seen at given redshift + zfile(string): file containing z values of above + """ + fz = np.load(ffile) + z = np.load(zfile) + + from scipy.interpolate import interp1d + f=interp1d(z,fz,kind="linear",bounds_error=False) + newfz=f(self.zvals) + + # check for unphysical values + toolow = np.where(newfz < 0.) + newfz[toolow] = 0 + toohigh = np.where(newfz > 1.) + newfz[toohigh] = 1. + + self.fz = newfz + + + #np.save(path+"/"+name+"_fz",newfz) + #np.save(path+"/"+name+"_z",newz) diff --git a/zdm/io.py b/zdm/io.py index a75f8b60..7b561321 100644 --- a/zdm/io.py +++ b/zdm/io.py @@ -70,7 +70,7 @@ def savejson(filename, obj, overwrite=False, indent=None, easy_to_read=False, json.dump(obj, fh, indent=indent, **kwargs) ######### misc function to load some data - do we ever use it? ########## - +#random def load_data(filename): if filename.endswith('.npy'): data=np.load(filename) diff --git a/zdm/iteration.py b/zdm/iteration.py index 1333ab77..d2384089 100644 --- a/zdm/iteration.py +++ b/zdm/iteration.py @@ -132,7 +132,6 @@ def get_log_likelihood(grid, s, norm=True, psnr=True, Pn=False, pNreps=True, pta else: print("Implementation is only completed for nD 1-3.") exit() - return llsum def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, @@ -1140,7 +1139,7 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal psnr += differential*survey.beam_o[i]*usew ###### Breaks p(snr,b,w) into three components, and saves them ##### - # this allows comoutations of psnr given b and w values, collapsing these over the dimensions of b and w + # this allows computations of psnr given b and w values, collapsing these over the dimensions of b and w if pwb: # psnr given beam, width, z,dm diff --git a/zdm/loading.py b/zdm/loading.py index 4b92056d..a51c1145 100644 --- a/zdm/loading.py +++ b/zdm/loading.py @@ -267,7 +267,7 @@ def surveys_and_grids(init_state=None, alpha_method=1, cos.set_cosmology(state) cos.init_dist_measures() - # get the grid of p(DM|z) + # get the grid of p(DMcosmic|z) zDMgrid, zvals,dmvals = misc_functions.get_zdm_grid( state, new=True, plot=False, method='analytic', nz=nz, ndm=ndm, zmax=zmax, dmmax=dmmax, diff --git a/zdm/misc_functions.py b/zdm/misc_functions.py index c15228f1..4fdfec75 100644 --- a/zdm/misc_functions.py +++ b/zdm/misc_functions.py @@ -1539,7 +1539,7 @@ def initialise_grids( # generates a DM mask # creates a mask of values in DM space to convolve with the DM grid mask = pcosmic.get_dm_mask( - dmvals, (state.host.lmean, state.host.lsigma), zvals, plot=True + dmvals, (state.host.lmean, state.host.lsigma), zvals, plot=False ) grids = [] for survey in surveys: diff --git a/zdm/optical.py b/zdm/optical.py index 26ac15cd..5b1bbd01 100644 --- a/zdm/optical.py +++ b/zdm/optical.py @@ -1,12 +1,58 @@ """ -This library contains routines that interact with -the FRB/astropath module and (optical) FRB host galaxy -information. - -It includes the class "host_model" for describing the -intrinsic FRB host galaxy distribution, associated functions, -and the approximate fraction of -detectable FRBs from Marnoch et al (https://doi.org/10.1093/mnras/stad2353) +Optical FRB host galaxy models and PATH interface for zdm. + +This module connects zdm redshift-DM grids with the PATH +(Probabilistic Association of Transients to their Hosts) algorithm by +providing physically motivated priors on FRB host galaxy apparent +magnitudes, p(m_r | DM_EG). + +Architecture +------------ +The module is built around a two-layer design: + +**Host magnitude models** each describes the intrinsic absolute +magnitude distribution of FRB host galaxies, p(M_r), and can convert +it to an apparent magnitude distribution p(m_r | z) at a given +redshift. Three models are provided: + +- ``simple_host_model``: parametric histogram of p(M_r) with N free + amplitudes (default 10), interpolated linearly or via spline. An + optional power-law k-correction can be included. N (or N+1) + free parameters. + +- ``loudas_model``: precomputed p(m_r | z) tables from Nick Loudas, + constructed by weighting galaxies by stellar mass or star-formation + rate. Interpolated between tabulated redshift bins with a + luminosity-distance shift. Single free parameter ``fSFR`` sets the + SFR/mass mixing fraction. + +- ``marnoch_model``: zero-parameter model. Fits a Gaussian to the + r-band magnitude distribution of known CRAFT ICS host galaxies from + Marnoch et al. 2023 (MNRAS 525, 994), using cubic splines for the + redshift-dependent mean and standard deviation. + +**``model_wrapper``** a survey-independent wrapper around any host +model. Given a zdm ``Grid`` and an observed DM_EG, it convolves +p(m_r | z) with the zdm p(z | DM_EG) posterior to produce a +DM-informed apparent magnitude prior for PATH. It also estimates +P_U, the prior probability that the true host is below the survey +detection limit. + +Typical usage +------------- +:: + + model = opt.marnoch_model() + wrapper = opt.model_wrapper(model, grid.zvals) + wrapper.init_path_raw_prior_Oi(DMEG, grid) # DM-specific initialisation + PU = wrapper.estimate_unseen_prior() + # pathpriors.USR_raw_prior_Oi is now set automatically by init_path_raw_prior_Oi + +Module-level data +----------------- +``frblist`` : list of str + TNS names of CRAFT ICS FRBs for which PATH optical data are + available (used by the scripts in ``zdm/scripts/Path/``). """ @@ -15,24 +61,471 @@ from zdm import cosmology as cos from zdm import optical_params as op from scipy.interpolate import CubicSpline +from scipy.interpolate import make_interp_spline +from scipy.stats import norm import os from importlib import resources import pandas +import h5py + +from astropath import priors as pathpriors +#import astropath.priors as pathpriors + + +################################################################### +############ Routines associated with Nick's model ################ +################################################################### + + +class marnoch_model: + """ + Class initiates a model based on Lachlan Marnoch's predictions + for FRB host galaxy visibility in + https://ui.adsabs.harvard.edu/abs/2023MNRAS.525..994M/abstract + Here, we assume that host galaxy magnitudes have a normal + distribution, with mean and standard deviation given by + L. Marnoch's data. + """ + + def __init__(self,OpticalState=None): + """ + Initialises the model. There are no variables here. + + Args: + OpticalState: allows the model to refer to an optical state. + However, the model is independent of that state. + """ + + # uses the "simple hosts" descriptor + if OpticalState is None: + OpticalState = op.OpticalState() + self.OpticalState = OpticalState + self.opstate = None + + # loads the dataset + self.load_data() + + # extracts subic splines for mean and std dev + self.process_rbands() + + + def load_data(self): + """ + Loads the Marnoch et al data on r-band magnitudes from FRB hosts + """ + from astropy.table import Table + datafile="magnitudes_and_probabilities_vlt-fors2_R-SPECIAL.ecsv" + infile = os.path.join(resources.files('zdm'), 'data', 'optical', datafile) + table = Table.read(infile, format='ascii.ecsv') + self.table = table + + def process_rbands(self): + """ + Build cubic spline fits to the mean and rms of p(m_r) as a function of z. + + Reads the per-FRB r-band magnitude columns from ``self.table``, + computes their mean (``Rbar``) and sample standard deviation (``Rrms``) + across all FRBs at each tabulated redshift, then fits two cubic splines: + + - ``self.sbar``: CubicSpline interpolating the mean apparent magnitude + as a function of redshift. + - ``self.srms``: CubicSpline interpolating the rms scatter as a + function of redshift. + + These splines are subsequently used by ``get_pmr_gz`` to evaluate + the Gaussian p(m_r | z) at arbitrary redshifts. + """ + + table = self.table + colnames = table.colnames + # gets FRBs + frblist=[] + for name in colnames: + if name[0:3]=="FRB": + frblist.append(name) + zlist = table["z"] + nz = zlist.size + nfrb = len(frblist) + Rmags = np.zeros([nfrb,nz]) + + for i,frb in enumerate(frblist): + + Rmags[i,:] = table[frb] + + # gets mean and rms + Rbar = np.average(Rmags,axis=0) + Rrms = (np.sum((Rmags - Rbar)**2,axis=0)/(nfrb-1))**0.5 + + # creates cubic spline fits to mean and rms of m_r as a function of z + self.sbar = CubicSpline(zlist,Rbar) + self.srms = CubicSpline(zlist,Rrms) + + #return Rbar,Rrms,zlist,sbar,srms + + def get_pmr_gz(self,mrbins,z): + """ + Return the apparent magnitude probability distribution p(m_r | z). + + Evaluates a Gaussian distribution whose mean and standard deviation + are obtained from the cubic splines fit in ``process_rbands``, + and integrates it over the provided magnitude bins. + + This model has no free parameters; the Gaussian moments are fully + determined by the Marnoch et al. 2023 host galaxy data. + + Parameters + ---------- + mrbins : array-like of float, length N+1 + Edges of the apparent magnitude bins over which to compute the + probability. The output has length N (one value per bin). + z : float + Redshift at which to evaluate the magnitude distribution. + + Returns + ------- + pmr : np.ndarray, length N + Probability in each magnitude bin (sums to ≤ 1; may be less + than unity if the Gaussian extends beyond the bin range). + """ + + mean = self.sbar(z) + rms = self.srms(z) + + deviates = (mrbins-mean)/rms + cprobs = norm.cdf(deviates) + pmr = cprobs[1:] - cprobs[:-1] + + return pmr + + + +class loudas_model: + """ + This class initiates a model based on Nick Loudas's model of + galaxy magnitudes as a function of redshift. The underlying + model is a description of galaxies as a function of + stellar mass and star-formation rate as a function of redshift. + + """ + + def __init__(self,OpticalState=None,fname='p_mr_distributions_dz0.01_z_in_0_1.2.h5',data_dir=None,verbose=False): + """ + Initialise the Loudas model, loading precomputed p(m_r | z) tables. + + Args: + OpticalState (OpticalState, optional): optical parameter state. A + default ``OpticalState`` is created if not provided. + fname (str): HDF5 filename containing the Loudas p(m_r | z) tables. + Defaults to ``'p_mr_distributions_dz0.01_z_in_0_1.2.h5'``. + data_dir (str, optional): directory containing ``fname``. Defaults + to the package data directory ``zdm/data/optical/``. + verbose (bool): if True, print progress messages. Defaults to False. + """ + + # uses the "simple hosts" descriptor + if OpticalState is None: + OpticalState = op.OpticalState() + self.OpticalState = OpticalState + + #extract the correct optical substate from the opstate + self.opstate = self.OpticalState.loudas + + self.fsfr = self.opstate.fSFR + + + # checks that cosmology is initialised + if not cos.INIT: + cos.init_dist_measures() + + # gets base input directory. In future, this may be expanded + if data_dir is None: + data_dir = os.path.join(resources.files('zdm'), 'data', 'optical') + + # load data and its properties + self.init_pmr(fname,data_dir) + + # initialises cubic splines for faster speedups + self.init_cubics() + + def init_pmr(self,fname,data_dir): + """ + Loads p(mr|z) distributions from Nick Loudas. Note - these are + actually distributions in apparent magnitude mr. + + Mostly, this wraps around Nick's code "load_p_mr_distributions". + I've kept them separate to distinguish between his code and mine -CWJ. + + """ + ####### loading p(mr) distributions ########## + zbins, rmag_centres, p_mr_sfr, p_mr_mass = self.load_p_mr_distributions( + data_dir, fname = fname) + + # zbins represent ranges. We also calculate z-bin centres + self.drmag = rmag_centres[1] - rmag_centres[0] + self.zbins = zbins + self.nzbins = zbins.size-1 + self.czbins = 0.5*(self.zbins[1:] + self.zbins[:-1]) + self.logzbins = np.log10(zbins) + self.clogzbins = 0.5*(self.logzbins[1:] + self.logzbins[:-1]) + self.rmags = rmag_centres # centres of rmag bins + self.p_mr_sfr = p_mr_sfr # sfr-weighted p_mr + self.p_mr_mass = p_mr_mass # mass-weighted p_mr + + + # we have now all the data we need! + + def init_cubics(self): + """ + initialises cubic splines that interpolate in mr. For later use (speedup!) + """ + + sfr_splines = [] + mass_splines = [] + for i in np.arange(self.nzbins): + sfr_spline = make_interp_spline(self.rmags,self.p_mr_sfr[i],k=1) + sfr_splines.append(sfr_spline) + + mass_spline = make_interp_spline(self.rmags,self.p_mr_mass[i],k=1) + mass_splines.append(mass_spline) + + self.mass_splines = mass_splines + self.sfr_splines = sfr_splines + + def get_pmr_gz(self,mrbins,z): + """ + Return the apparent magnitude probability distribution p(m_r | z). + + Interpolates between the two nearest tabulated redshift bins (in + log-z space), applying a luminosity-distance shift to each tabulated + p(m_r) before combining them. The mass- and SFR-weighted distributions + are mixed according to ``self.fsfr`` (set via ``init_args``). + + The result is normalised so that the bin probabilities sum to unity + over the full magnitude range, provided the distribution does not + extend significantly beyond ``mrbins``. + Parameters + ---------- + mrbins : array-like of float, length N+1 + Edges of the apparent magnitude bins. Output has length N. + z : float + Redshift at which to evaluate the distribution. Values outside + the tabulated range are extrapolated from the nearest edge bin. -class host_model: + Returns + ------- + pmr : np.ndarray, length N + Probability in each apparent magnitude bin (sums to ≤ 1). + """ + + fsfr = self.fsfr + + # gets interpolation coefficients + lz = np.log10(z) + if lz < self.clogzbins[0]: + # sets values equal to that of smallest bin, to avoid interpolation + i1=0 + i2=1 + k1=1. + k2=0. + elif lz > self.clogzbins[-1]: + i1=self.nzbins-2 + i2=self.nzbins-1 + k1=0 + k2=1. + else: + i1 = np.where(lz > self.clogzbins)[0][-1] # gets lowest value where zs are larger + i2=i1+1 + k2 = (lz-self.clogzbins[i1])/(self.clogzbins[i2]-self.clogzbins[i1]) + k1 = 1.-k2 + + z1=self.czbins[i1] + z2=self.czbins[i2] + + # the mr distributions are apparent magnitudes + # hence, we have to interpolate between z-bins using first-order shifting + # this is *very* important for low values of z + + DL = cos.dl(z) + DL1 = cos.dl(z1) + DL2 = cos.dl(z2) + + # calculates shifts in logarithm. Still shifts when z is lower or higher than m_r + # note: a factor of 2 in DL means a factor of 4 in luminosity, meaning + # 5/2 log10(4) in mr = 5 log10(2). + dmr1 = 5.*np.log10(DL/DL1) # will be a positive shift + dmr2 = 5.*np.log10(DL/DL2) # will be a negative shift + + + mr_centres = (mrbins[:-1]+mrbins[1:])/2. + + # will interpolate the values at *lower* magnitudes, effectively shifting distribution up + p_mr_mass1 = self.mass_splines[i1](mr_centres - dmr1) + + # will interpolate the values at *higher* magnitudes, effectively shifting distribution down + p_mr_mass2 = self.mass_splines[i2](mr_centres - dmr2) + + + # will interpolate the values at *lower* magnitudes, effectively shifting distribution up + p_mr_sfr1 = self.sfr_splines[i1](mr_centres - dmr1) + # will interpolate the values at *higher* magnitudes, effectively shifting distribution down + p_mr_sfr2 = self.sfr_splines[i2](mr_centres - dmr2) + + # distribution for that redshift assuming mass weighting + pmass = k1*p_mr_mass1 + k2*p_mr_mass2 + + # just left here for testing purposes + if False: + print("Redshift bins are ",z,z1,z2) + print("Luminosity distances are ",DL,DL1,DL2) + print("shifts are therefore ",dmr1,dmr2) + + # generate an example plot showing interpolation + plt.plot(self.rmags,p_mr_mass1,linestyle="-",label="scaled from z0") + plt.plot(self.rmags,p_mr_mass2,linestyle="--",label="scaled from z1") + plt.plot(self.rmags,self.p_mr_mass[i1],linestyle=":",label="z0") + plt.plot(self.rmags,self.p_mr_mass[i2],linestyle=":",label="z1") + plt.legend() + plt.tight_layout() + plt.show() + exit() + + # distribution for that redshift assuming sfr weighting + psfr = k1*p_mr_sfr1 + k2*p_mr_sfr2 + + # mean weighted distribution + pmr = pmass*(1.-fsfr) + psfr*fsfr + + # normalise by relative bin width - recall, bins should sum to unity + pmr *= (mrbins[1]-mrbins[0])/self.drmag + + # remove negative probabilities - set to zero, and re-normalise + prevsum = np.sum(pmr) + bad = np.where(pmr < 0.)[0] + pmr[bad] = 0. + newsum = np.sum(pmr) + pmr *= prevsum / newsum + + return pmr + + def load_p_mr_distributions(self,data_dir,fname: str = 'p_mr_distributions_dz0.01_z_in_0_1.2.h5') -> tuple: + """ + This code originally written by Nick Loudas. Used with permission + + Load the p(mr|z) distributions from an HDF5 file. + Args: + fname (str): Input filename. + output_dir (str): Directory where the file is stored. Optional (otherwise defaults as below) + Returns: + zbins (np.array): Redshift bin edges. + rmag_centers (np.array): Centers of r-band magnitude bins. + p_mr_sfr (np.array): p(mr|z) for SFR-weighted population. Shape: (len(zbins) - 1, + rmag_resolution). rmag_resolution(=len(rmag_centers)) is fixed across redshift bins. + p_mr_mass (np.array): p(mr|z) for Mass-weighted population. Shape: (len(zbins) - 1, + rmag_resolution). rmag_resolution(=len(rmag_centers)) is fixed across redshift bins. + Note: + The PDF in m_r within a given redshift bin [z1,z2] has been computed at the right edge of the bin (z = z2). + """ + infile = os.path.join(data_dir,fname) + with h5py.File(infile, 'r') as hf: + zbins = np.array(hf['zbins']) + zbins = zbins[1:] # first bin is "extra" for "reasons" + rmag_centers = np.array(hf['rmag_centers']) + p_mr_sfr = np.array(hf['p_mr_sfr']) + p_mr_mass = np.array(hf['p_mr_mass']) + + # normalise these probabilities such that the bins sum to unity + p_mr_sfr = (p_mr_sfr.T / np.sum(p_mr_sfr,axis=1)).T + p_mr_mass = (p_mr_mass.T / np.sum(p_mr_mass,axis=1)).T + + print(f"p(mr|z) distributions loaded successfully from 'p_mr_dists/{fname}'") + n_redshift_bins = len(zbins) - 1 + return zbins, rmag_centers, p_mr_sfr, p_mr_mass + + def give_p_mr_mass(self,z: float): + """ + Return p(m_r | z) for the stellar-mass-weighted host population. + + Uses a nearest-bin lookup (no interpolation) in the tabulated redshift + grid. For interpolated results with luminosity-distance shifting, use + ``get_pmr_gz`` with ``fSFR=0`` instead. + + Args: + z (float): Redshift value. + + Returns: + np.ndarray: p(m_r | z) values at the tabulated r-band magnitude + centres (``self.rmags``), normalised to sum to unity. + """ + # Find the appropriate redshift bin index + idx = np.clip(np.searchsorted(self.zbins, z) - 1, 0, n_redshift_bins - 1) + return self.p_mr_mass[idx] + + def give_p_mr_sfr(self,z: float): + """ + Return p(m_r | z) for the star-formation-rate-weighted host population. + + Uses a nearest-bin lookup (no interpolation) in the tabulated redshift + grid. For interpolated results with luminosity-distance shifting, use + ``get_pmr_gz`` with ``fSFR=1`` instead. + + Args: + z (float): Redshift value. + + Returns: + np.ndarray: p(m_r | z) values at the tabulated r-band magnitude + centres (``self.rmags``), normalised to sum to unity. + """ + # Find the appropriate redshift bin index + idx = np.clip(np.searchsorted(self.zbins, z) - 1, 0, n_redshift_bins - 1) + return self.p_mr_sfr[idx] + + def init_args(self,fSFR): + """ + Set the SFR/mass mixing fraction for the Loudas model. + + Args: + fSFR (float or array-like of length 1): fraction of FRB hosts + that trace star-formation rate. ``fSFR=0`` gives a purely + mass-weighted population; ``fSFR=1`` gives a purely + SFR-weighted population. Intermediate values linearly mix + the two. If an array is passed, only the first element is used. + """ + # for numerical purposes, fSFR may have to be a vector + if hasattr(fSFR,'__len__'): + fSFR = fSFR[0] + self.fsfr = fSFR + + def init_priors(self,zlist): + """ + Generates magniude prior distributions for a list of redshifts + This allows faster interpolation later. + + Currently, this is not used! + """ + print("WARNING: redundant init priors!!!!!!") + exit() + mass_priors = np.zeros([zlist.size,self.nmr]) + sfr_priors = np.zeros([zlist.size,self.nmr]) + for i,z in enumerate(zlist): + mass_priors[i,:] = self.get_p_mr(z,0.) + sfr_priors[i,:] = self.get_p_mr(z,1.) + self.mass_priors = mass_priors + self.sfr_priors = sfr_priors + +class simple_host_model: """ A class to hold information about the intrinsic properties of FRB - host galaxies. Eventually, this should be expanded to be a - meta-class with different internal models. But for now, it's - just a simple one + host galaxies. This is a simple but generic model. Ingredients are: A model for describing the intrinsic distribution of host galaxies. This model must be described by some set of parameters, and be able to return a prior as a function of intrinsic host galaxy magnitude. - This model is initialised via opstate.AbsModelID + This model is initialised via opstate.AbsModelID. + Here, it is just 10 parameters at different absolute + magnitudes, with linear/spline interpolation A model for converting absolute to apparent host magnitudes. This is by defult an apparent r-band magnitude, though @@ -40,18 +533,9 @@ class host_model: Internally, this class initialises: An array of absolute magnitudes, which get weighted according - to the host model. - Internal variables associated with this are prefaced "Model" - - An array of apparent magnitudes, which is used to compare with - host galaxy candidates - Internal variables associated with this are prefaced "App" + to the host model. Internal variables associated with this + are prefaced "Model" - Arrays mapping intrinsic to absolute magnitude as a function - of redshift, to allow quick estimation of p(apparent_mag | DM) - for a given FRB survey with many FRBs - Internal variables associated with this are prefaced "Abs" - Note that while this class describes the intrinsic "magnitudes", really magnitude here is a proxy for whatever parameter is used to intrinsically describe FRBs. However, only 1D descriptions are @@ -59,111 +543,114 @@ class host_model: evolution, and 2D descriptions (e.g. mass, SFR) at any given redshift. """ - def __init__(self,opstate=None,verbose=False): + def __init__(self,OpticalState=None,verbose=False): """ - Class constructor - + Initialise the simple host magnitude model. + Args: - opstate (class: Hosts, optional): class defining parameters - of optical state model - verbose (bool, optional): to be verbose y/n - + OpticalState (OpticalState, optional): optical parameter state + providing model configuration (magnitude ranges, number of + bins, interpolation scheme, k-correction flag). A default + ``OpticalState`` is created if not provided. + verbose (bool, optional): if True, print which sub-models are + being initialised. Defaults to False. """ - if opstate is None: - opstate = op.Hosts() + # uses the "simple hosts" descriptor + if OpticalState is None: + self.OpticalState = op.OpticalState() + else: + self.OpticalState = OpticalState + self.opstate = self.OpticalState.simple + # checks that cosmology is initialised + if not cos.INIT: + cos.init_dist_measures() - if opstate.AppModelID == 0: + if self.opstate.AppModelID == 0: if verbose: print("Initialising simple luminosity function") - # must take arguments of (absoluteMag,z) + # must take arguments of (absoluteMag,k,z) self.CalcApparentMags = SimpleApparentMags + self.CalcAbsoluteMags = SimpleAbsoluteMags + elif self.opstate.AppModelID == 1: + if verbose: + print("Initialising k-corrected luminosity function") + # must take arguments of (absoluteMag,k,z) + self.CalcApparentMags = SimplekApparentMags + self.CalcAbsoluteMags = SimplekAbsoluteMags else: - raise ValueError("Model ",opstate.AppModelID," not implemented") + raise ValueError("Model ",self.opstate.AppModelID," not implemented") - if opstate.AbsModelID == 0: + if self.opstate.AbsModelID == 0: if verbose: print("Describing absolute mags with N independent bins") - elif opstate.AbsModelID == 1: + elif self.opstate.AbsModelID == 1: + if verbose: + print("Describing absolute mags with linear interpoilation of N points") + elif self.opstate.AbsModelID == 2: if verbose: print("Describing absolute mags with spline interpoilation of N points") + elif self.opstate.AbsModelID == 3: + if verbose: + print("Describing absolute mags with spline interpoilation of N log points") else: - raise ValueError("Model ",opstate.AbsModelID," not implemented") + raise ValueError("Model ",self.opstate.AbsModelID," not implemented") - self.AppModelID = opstate.AppModelID - self.AbsModelID = opstate.AbsModelID + self.AppModelID = self.opstate.AppModelID + self.AbsModelID = self.opstate.AbsModelID + - self.opstate = opstate self.init_abs_bins() self.init_model_bins() - self.init_app_bins() - self.init_abs_prior() - - self.ZMAP = False # records that we need to initialise this - - ############################################################# - ################## Initialisation Functions ################# - ############################################################# - - def init_abs_prior(self): - """ - Initialises prior on absolute magnitude of galaxies according to the method. - - """ + # could perhaps use init args for this? if self.opstate.AbsPriorMeth==0: # uniform prior in log space of absolute magnitude - Absprior = np.full([self.ModelNBins],1./self.NAbsBins) + AbsPrior = np.full([self.ModelNBins],1./self.NAbsBins) else: # other methods to be added as required raise ValueError("Luminosity prior method ",self.opstate.AbsPriorMeth," not implemented") # enforces normalisation of the prior to unity - Absprior /= np.sum(Absprior) - self.AbsPrior = Absprior + self.AbsPrior = AbsPrior/np.sum(AbsPrior) + # k-correction + self.k = self.opstate.k # this maps the weights from the parameter file to the absoluate magnitudes use # internally within the program. We now initialise this during an "init" - self.AbsMagWeights = self.init_abs_mag_weights() + self.init_abs_mag_weights() - # renormalises the weights, so all internal apparent mags sum to unit - # include this step in the init routine perhaps? - self.AbsMagWeights /= np.sum(self.AbsMagWeights) - - def init_app_bins(self): + # the below is done for the wrapper function + #self.ZMAP = False # records that we need to initialise this + + def get_args(self): """ - Initialises bins in apparent magnitude - It uses these to calculate priors for any given host galaxy magnitude. - This is a very simple set of uniformly log-spaced bins in magnitude space, - and linear interpolation is used between them. + function to return args as a vector in the form of init_args """ - self.Appmin = self.opstate.Appmin - self.Appmax = self.opstate.Appmax - self.NAppBins = self.opstate.NAppBins - - # this creates the bin edges - self.AppBins = np.linspace(self.Appmin,self.Appmax,self.NAppBins+1) - dAppBin = self.AppBins[1] - self.AppBins[0] - self.AppMags = self.AppBins[:-1] + dAppBin/2. - self.dAppmag = dAppBin - + if self.opstate.AppModelID == 0: + args = self.AbsPrior + elif self.opstate.AppModelID == 1: + args = np.zeros([self.ModelNBins+1]) + args[1:] = self.AbsPrior + args[0] = self.k + return args + def init_abs_bins(self): """ Initialises internal array of absolute magnitudes This is a simple set of uniformly log-spaced bins in terms of absolute magnitude, which the absolute magnitude model gets - projected onto + projected onto. """ # shortcuts Absmin = self.opstate.Absmin Absmax = self.opstate.Absmax NAbsBins = self.opstate.NAbsBins - self.Absmin = Absmin self.Absmax = Absmax self.NAbsBins = NAbsBins @@ -176,6 +663,33 @@ def init_abs_bins(self): self.MagBins = MagBins self.dMag = dMag self.AbsMags = AbsMags + + + def init_args(self,Args): + """ + Initialises prior on absolute magnitude of galaxies according to the method. + + Args: + - Args (list of floats): The prior on absolute magnitudes + to set for this model + IF AppModelID == 1, then interpret the first as the k-correction + + """ + # Eventually, incorporate the AbsPrior vector into SimpleParams + #self.opstate = OpticalState.SimpleParams + + if self.opstate.AppModelID == 0: + AbsPrior=Args + elif self.opstate.AppModelID == 1: + AbsPrior=Args[1:] + self.k = Args[0] + + # enforces normalisation of the prior to unity + self.AbsPrior = AbsPrior/np.sum(AbsPrior) + + # this maps the weights from the parameter file to the absoluate magnitudes use + # internally within the program. We now initialise this during an "init" + self.init_abs_mag_weights() def init_model_bins(self): """ @@ -196,81 +710,111 @@ def init_model_bins(self): dbin = (self.Absmax - self.Absmin)/ModelNBins ModelBins = np.linspace(self.Absmin+dbin/2.,self.Absmax-dbin/2.,ModelNBins) - elif self.AbsModelID == 1: + else: # bins on edges ModelBins = np.linspace(self.Absmin,self.Absmax,ModelNBins) self.ModelBins = ModelBins self.dModel = ModelBins[1]-ModelBins[0] - def init_zmapping(self,zvals): - """ - For a set of redshifts, initialise mapping - between intrinsic magnitudes and apparent magnitudes - - This routine only needs to be called once, since the model - to convert absolute to apparent magnitudes is fixed - - It is not set automatically however, and needs to be called - with a set of z values. This is all for speedup purposes. - - Args: - zvals (np.ndarray, float): array of redshifts over which - to map absolute to apparent magnitudes. + def get_pmr_gz(self,mrbins,z): """ - - # records that this has been initialised - self.ZMAP = True - - # mapping of apparent to absolute magnitude - self.zmap = self.CalcApparentMags(self.AbsMags,zvals) - self.zvals = zvals - self.NZ = self.zvals.size - - self.init_maghist() - - def init_maghist(self): + Return the apparent magnitude probability distribution p(m_r | z). + + Converts each apparent magnitude bin centre back to an absolute + magnitude using ``CalcAbsoluteMags``, then linearly interpolates + the absolute magnitude weight array (``self.AbsMagWeights``) to + obtain a probability density at each bin. + + Parameters + ---------- + mrbins : np.ndarray of float, length N+1 + Edges of the apparent magnitude bins. The output has length N, + with one probability value per bin centre. + z : float + Redshift at which to evaluate the distribution. + + Returns + ------- + pmr : np.ndarray, length N + Probability density at each apparent magnitude bin centre. + Values at the edges of the absolute magnitude range are + clamped to the nearest valid bin rather than extrapolated. + + Notes + ----- + The returned values are NOT renormalised to sum to unity; the sum + may be less than one if some absolute magnitudes lie outside the + model range ``[Absmin, Absmax]``. This is intentional: the + shortfall represents probability mass for hosts too faint or too + bright to appear in the apparent magnitude range. """ - Initialises the array mapping redshifts and absolute magnitudes - to redshift and apparent magnitude - - Calculates the internal maghist array, of size self.NAppBins X self.NZ - No return value. + old = False + if old: + # mapping of apparent to absolute magnitude + if self.opstate.AppModelID == 0: + mrvals = self.CalcApparentMags(self.AbsMags,z) # works with scalar z + elif self.opstate.AppModelID == 1: + mrvals = self.CalcApparentMags(self.AbsMags,self.k,z) # works with scalar z - """ - - # for current model, calculate weighted histogram of apparent magnitude - # for each redshift. Done by converting intrinsic to apparent for each z, - # then suming up the associated weights - maghist = np.zeros([self.NAppBins,self.NZ]) - for i in np.arange(self.NZ): # creates weighted histogram of apparent magnitudes, # using model weights from wmap (which are fixed for all z) - hist,bins = np.histogram(self.zmap[:,i],weights=self.AbsMagWeights,bins=self.AppBins) + hist,bins = np.histogram(mrvals,weights=self.AbsMagWeights,bins=mrbins) - # # NOTE: these should NOT be re-normalised, since the normalisation reflects - # true magnitudes which fall off the apparent magnitude histogram. - maghist[:,i] = hist + #smoothing function - just to flatten the params + NS=10 + smoothf = self.gauss(mrvals[0:NS] - np.average(mrvals[0:NS])) + smoothf /= np.sum(smoothf) + smoothed = np.convolve(hist,smoothf,mode="same") - self.maghist = maghist - - def reinit_model(self): + #smoothed=hist. Not sure yet if smoothing is the right thing to do! + pmr = smoothed + else: + # probability density at bin centre + mrbars = (mrbins[:-1] + mrbins[1:])/2. + + # get absolute magnitudes corresponding to these apparent magnitudes + if self.opstate.AppModelID == 0: + Mrvals = self.CalcAbsoluteMags(mrbars,z) # works with scalar z + elif self.opstate.AppModelID == 1: + Mrvals = self.CalcAbsoluteMags(mrbars,self.k,z) # works with scalar z + + # linear interpolation + # note that dMr = dmr, so we just map probability densities + + + kmag2s = (Mrvals - self.Absmin)/self.dMag + imag1s = np.floor(kmag2s).astype('int') + kmag2s -= imag1s + kmag1s = 1.-kmag2s + imag2s = imag1s+1 + + # guards against things that are too low + toolow = np.where(imag1s < 0)[0] + imag1s[toolow]=0 + kmag1s[toolow]=1 + imag2s[toolow]=1 + kmag2s[toolow]=0 + + # guards against things that are too high + toohigh = np.where(imag2s >= self.NAbsBins)[0] + imag1s[toohigh]=self.NAbsBins-2 + kmag1s[toohigh]=0 + imag2s[toohigh]=self.NAbsBins-1 + kmag2s[toohigh]=1. + + pmr = self.AbsMagWeights[imag1s] * kmag1s + self.AbsMagWeights[imag2s] * kmag2s + + # # NOTE: these should NOT be re-normalised, since the normalisation reflects + # true magnitudes which fall off the apparent magnitude histogram. + return pmr + + def gauss(self,x,mu=0,sigma=0.1): """ - Re-initialises all internal info which depends on the optical - param model. It assumes that the changes have been implemented in - self.AbsPrior + simple Gaussian smoothing function """ - - # this maps the weights from the parameter file to the absoluate magnitudes use - # internally within the program. We now initialise this during an "init" - self.AbsMagWeights = self.init_abs_mag_weights() - - # renormalises the weights, so all internal apparent mags sum to unity - # include this step in the init routine perhaps? - self.AbsMagWeights /= np.sum(self.AbsMagWeights) - - self.init_maghist() + return np.exp(-0.5*(x-mu)**2/sigma**2) def init_abs_mag_weights(self): """ @@ -294,22 +838,256 @@ def init_abs_mag_weights(self): weights = self.AbsPrior[self.imags] elif self.AbsModelID == 1: + # linear interpolation + # gives mapping from model bins to mag bins + weights = np.interp(self.AbsMags,self.ModelBins,self.AbsPrior) + + elif self.AbsModelID == 2: # As above, but with spline interpolation of model. # coefficients span full range cs = CubicSpline(self.ModelBins,self.AbsPrior) weights = cs(self.AbsMags) + # ensures no negatives toolow = np.where(weights < 0.) weights[toolow] = 0. + # ensures that if everything is zero above/below a point, so is the interpolation + iFirstNonzero = np.where(self.AbsPrior > 0.)[0][0] + if iFirstNonzero > 0: + toolow = np.where(self.AbsMags < self.ModelBins[iFirstNonzero -1]) + weights[toolow] = 0. + iLastNonzero = np.where(self.AbsPrior > 0.)[0][-1] + if iLastNonzero < self.AbsPrior.size - 1: + toohigh = np.where(self.AbsMags > self.ModelBins[iLastNonzero+1]) + weights[toohigh] = 0. + elif self.AbsModelID == 3: + # As above, but splines interpolate in *log* space + cs = CubicSpline(self.ModelBins,self.AbsPrior) + weights = cs(self.AbsMags) + weights = 10**weights + + else: + raise ValueError("This weighting scheme not yet implemented") + + + # renormalises the weights, so all internal apparent mags sum to unit + self.AbsMagWeights = weights / np.sum(weights) + + return + + +class model_wrapper: + """ + Generic functions applicable to all models. + + The program flow is to initialise with a host model ("model"), + then given arrays of Mr and zvalues, pre-calculate an array + of p(Mr|z), and then for individual host galaxies with a + p(z|DM) distribution, be able to return priors for PATH. + + Internally, the code uses an array of apparent magnitudes, + which is used to compare with host galaxy candidates. + Internal variables associated with this are prefaced "App" + + The Arrays mapping intrinsic to absolute magnitude as a function + of redshift, to allow quick estimation of p(apparent_mag | DM) + for a given FRB survey with many FRBs + Internal variables associated with this are prefaced "Abs" + + + The workflow is: + -init with a model class and array of z values. This sets + absolute magnitude bins. The z values should correspond + to those from a grid object. + Initialisation primarily calls p(Mr|z) repeatedly for all internal + Mr and z values, to allow fast evaluation in the future + - set up PATH functions to point to this array: + pathpriors.USR_raw_prior_Oi = wrapper.path_raw_prior_Oi + - initialise this class for a given init_path_raw_prior_Oi(DM,grid). + This calculates magnitude priors given p(z|DM) (grid) + and p(mr|z) (host model). + + """ + def __init__(self,model,zvals): + """ + Initialises model wrapper. + + + Args: + model (class object): Model is one of the host model class objects + that can calculate p(Mr|z) + zvals (np.array): redshift values corresponding to grid object + opstate (class optical): state containing optical info + + """ + + # higher level state defining optical parameters + self.OpticalState = model.OpticalState + + #parameters defining chance of identifying a galaxy with magnitude m + self.pU_mean = self.OpticalState.id.pU_mean + self.pU_width = self.OpticalState.id.pU_width + + # specific substate of the model + self.opstate = model.opstate + + self.model = model # checks the model has required attributes + + + # initialise bins in apparent magnitude + self.init_app_bins() + + self.init_zmapping(zvals) + + + def init_app_bins(self): + """ + Initialises bins in apparent magnitude + It uses these to calculate priors for any given host galaxy magnitude. + This is a very simple set of uniformly log-spaced bins in magnitude space, + and linear interpolation is used between them. + """ + + + self.Appmin = self.OpticalState.app.Appmin + self.Appmax = self.OpticalState.app.Appmax + self.NAppBins = self.OpticalState.app.NAppBins + + # this creates the bin edges + self.AppBins = np.linspace(self.Appmin,self.Appmax,self.NAppBins+1) + dAppBin = self.AppBins[1] - self.AppBins[0] + self.AppMags = self.AppBins[:-1] + dAppBin/2. + self.dAppmag = dAppBin + + def init_zmapping(self,zvals): + """ + For a set of redshifts, initialise mapping + between intrinsic magnitudes and apparent magnitudes + + This routine only needs to be called once, since the model + to convert absolute to apparent magnitudes is fixed + + It is not set automatically however, and needs to be called + with a set of z values. This is all for speedup purposes. + + Args: + zvals (np.ndarray, float): array of redshifts over which + to map absolute to apparent magnitudes. + """ + + self.zvals=zvals + + # we aim to produce a grid of p(z,m_r) for rapid convolution + # with a p(z) array + self.nz = zvals.size + + p_mr_z = np.zeros([self.NAppBins,self.nz]) + + for i,z in enumerate(zvals): + # use the model to calculate p(mr|z) for range of z-values + # this is then stored in an array. + # NOTE! This could become un-normalised due to + # interpolation falling off the edge + # hence, we normalise it + this_p_mr_z = self.model.get_pmr_gz(self.AppBins,z) + this_p_mr_z /= np.sum(this_p_mr_z) + p_mr_z[:,i] = this_p_mr_z + self.p_mr_z = p_mr_z + + # records that this has been initialised + self.ZMAP = True + + ############################################################# + ################## Path Calculations ################# + ############################################################# + + + def init_path_raw_prior_Oi(self,DM,grid): + """ + Initialise the apparent magnitude prior for a single FRB DM. + + Computes p(m_r | DM_EG) by convolving the precomputed p(m_r | z) + grid (``self.p_mr_z``) with the zdm posterior p(z | DM_EG) extracted + from ``grid``. The result is stored internally so that + ``path_raw_prior_Oi`` can be called repeatedly for different host + galaxy candidates belonging to the same FRB without recomputing the + DM integral. + + Also computes the probability that the host is undetected: + - ``self.priors``: p(m_r | DM) weighted by the detection probability + p(detected | m_r). + - ``self.PUdist``: the magnitude-resolved contribution to P_U. + - ``self.PU``: scalar total prior probability that the host is unseen, + returned by ``estimate_unseen_prior()``. + + After this call, ``pathpriors.USR_raw_prior_Oi`` is automatically + pointed at ``self.path_raw_prior_Oi``. + + Args: + DM (float): extragalactic dispersion measure of the FRB (pc cm⁻³). + grid (Grid): initialised zdm grid object providing p(z, DM). + """ + + # we start by getting the posterior distribution p(z) + # for an FRB with DM DM seen by the 'grid' + pz = get_pz_prior(grid,DM) + + # checks that pz is normalised + pz /= np.sum(pz) + + priors = np.sum(self.p_mr_z * pz,axis=1) # sums over z + + # stores knowledge of the DM used to calculate the priors + self.prior_DM = DM + self.raw_priors = priors + + pU = pUgm(self.AppMags,self.pU_mean,self.pU_width) + + self.priors = self.raw_priors * (1.-pU) + self.PUdist = self.raw_priors * pU + self.PU = np.sum(self.PUdist) + + # sets the PATH user function to point to its own + pathpriors.USR_raw_prior_Oi = self.path_raw_prior_Oi + + #return priors + + def get_posterior(self, grid, DM): + """ + Return apparent magnitude and redshift posteriors for a given DM. + + Computes p(z | DM) from the grid and convolves it with the + precomputed ``self.maghist`` to obtain p(m_r | DM). + + Note: from PATH's perspective this is a prior on host magnitude, + but from zdm's perspective it is a posterior on redshift given DM. + + This method predates ``init_path_raw_prior_Oi`` and may not be + actively used in current scripts. + + Args: + grid (Grid): initialised zdm grid object providing p(z, DM). + DM (float or np.ndarray): FRB extragalactic DM(s) in pc cm⁻³. + + Returns: + papps (np.ndarray): probability distribution of apparent magnitude + given DM, p(m_r | DM). + pz (np.ndarray): probability distribution of redshift given DM, + p(z | DM). + """ + # Step 1: get prior on z + pz = get_pz_prior(grid,DM) + + ### STEP 2: get apparent magnitude distribution ### + if hasattr(DM,"__len__"): + papps = np.dot(self.maghist,pz) else: - raise ValueError("This weighting scheme not yet implemented") - return weights - + papps = self.maghist*pz + + + return papps,pz - ############################################################# - ################## Path Calculations ################# - ############################################################# - def estimate_unseen_prior(self,mag_limit): + def estimate_unseen_prior(self): """ Calculates PU, the prior that an FRB host galaxy of a particular DM is unseen in the optical image @@ -331,11 +1109,77 @@ def estimate_unseen_prior(self,mag_limit): """ - invisible = np.where(self.AppMags > mag_limit)[0] + # smooth cutoff + #pU_g_mr = pogm(self.AppMags,mean,width) + + # simple hard cutoff - now redundant + #invisible = np.where(self.AppMags > mag_limit)[0] + + #PU = np.sum(pU_g_mr * self.priors) + + #PU = np.sum(self.priors[invisible]) + + # we now pre-calculate this at the init raw path prior stage + #PU = np.sum(self.PU) + + return self.PU + + def path_base_prior(self,mags): + """ + Evaluate the apparent magnitude prior p(m_r | DM) at a list of magnitudes. + + Linearly interpolates ``self.priors`` (which already incorporates the + detection probability p(detected | m_r)) at each requested magnitude, + converting from the internally normalised sum-to-unity convention to a + probability density by dividing by the bin width ``self.dAppmag``. + + Unlike ``path_raw_prior_Oi``, this method does NOT divide by the + galaxy surface density Sigma_m, so it returns the raw magnitude prior + without the PATH normalisation factor. + + Args: + mags (list or tuple of float): apparent r-band magnitudes of + candidate host galaxies at which to evaluate the prior. + + Returns: + Ois (np.ndarray): prior probability density p(m_r | DM) evaluated + at each magnitude in ``mags``. + """ + ngals = len(mags) + Ois = [] + for i,mag in enumerate(mags): + + #print(mag) + # calculate the bins in apparent magnitude prior + kmag2 = (mag - self.Appmin)/self.dAppmag + imag1 = int(np.floor(kmag2)) + imag2 = imag1 + 1 + kmag2 -= imag1 #residual; float + kmag1 = 1.-kmag2 + + # careful with interpolation - priors are for magnitude bins + # with bin edges give by Appmin + N dAppmag. + # We probably want to smooth this eventually due to minor + # numerical tweaks + + #kmag2 -= imag1 + #kmag1 = 1.-kmag2 + #imag2 = imag1+1 + #prior = kmag1*self.priors[imag1] + kmag2*self.priors[imag2] + + # simple linear interpolation + Oi = self.priors[imag1] * kmag1 + self.priors[imag2] * kmag2 + + # correct normalisation - otherwise, priors are defined to sum + # such that \sum priors = 1; here, we need \int priors dm = 1 + Oi /= self.dAppmag + + Ois.append(Oi) + + Ois = np.array(Ois) + return Ois - PU = np.sum(self.priors[invisible]) - return PU def path_raw_prior_Oi(self,mags,ang_sizes,Sigma_ms): """ @@ -372,6 +1216,9 @@ def path_raw_prior_Oi(self,mags,ang_sizes,Sigma_ms): # calculate the bins in apparent magnitude prior kmag2 = (mag - self.Appmin)/self.dAppmag imag1 = int(np.floor(kmag2)) + imag2 = imag1 + 1 + kmag2 -= imag1 #residual; float + kmag1 = 1.-kmag2 # careful with interpolation - priors are for magnitude bins # with bin edges give by Appmin + N dAppmag. @@ -383,106 +1230,38 @@ def path_raw_prior_Oi(self,mags,ang_sizes,Sigma_ms): #imag2 = imag1+1 #prior = kmag1*self.priors[imag1] + kmag2*self.priors[imag2] - # very simple - just gives probability for bin it's in - Oi = self.priors[imag1] - Oi /= Sigma_ms[i] # normalise by host counts + # simple linear interpolation + Oi = self.priors[imag1] * kmag1 + self.priors[imag2] * kmag2 + + # correct normalisation - otherwise, priors are defined to sum + # such that \sum priors = 1; here, we need \int priors dm = 1 + Oi /= self.dAppmag + + # modify sigma_ms by P(m|O) + pogm = (1.-pUgm(mag,self.pU_mean,self.pU_width)) + numerator = Sigma_ms[i] * pogm + Oi /= numerator # normalise by host counts + Ois.append(Oi) Ois = np.array(Ois) return Ois - def init_path_raw_prior_Oi(self,DM,grid): - """ - Initialises the priors for a particlar DM. - This performs a function very similar to - "get_posterior" except that it expicitly - only operates on a single DM, and saves the - information internally so that - path_raw_prior_Oi can be called for numerous - host galaxy candidates. - - It returns the priors distribution. - - Args: - DM [float]: dispersion measure of an FRB (pc cm-3) - grid (class grid): initialised grid object from which - to calculate priors - - Returns: - priors (float): vector of priors on host galaxy apparent magnitude - """ - - # we start by getting the posterior distribution p(z) - # for an FRB with DM DM seen by the 'grid' - pz = get_pz_prior(grid,DM) - - # we now calculate the list of priors - for the array - # defined by self.AppBins with bin centres at self.AppMags - priors = self.calc_magnitude_priors(grid.zvals,pz) - - # stores knowledge of the DM used to calculate the priors - self.prior_DM = DM - self.priors = priors - - return priors - - - def calc_magnitude_priors(self,zlist:np.ndarray,pzlist:np.ndarray): - """ - Calculates priors as a function of magnitude for - a given redshift distribution. - - Args: - zlist (np.ndarray, float): array of redshifts - pz (np.ndarray, float): array of probabilities of the FRB - occurring at each of those redshifts - - # returns probability-weighted magnitude distribution, as a function of - # self.AppBins - - """ - # we integrate over the host absolute magnitude distribution - - # checks that pz is normalised - pzlist /= np.sum(pzlist) - - for i,absmag in enumerate(self.AbsMags): - plum = self.AbsMagWeights[i] - mags = self.CalcApparentMags(absmag,zlist) - temp,bins = np.histogram(mags,weights=pzlist*plum,bins=self.AppBins) - if i==0: - pmags = temp - else: - pmags += temp - - return pmags - - def get_posterior(self, grid, DM): + + + +################# Useful functions not associated with a class ######### + + +def load_marnoch_data(): """ - Returns posterior redshift distributiuon for a given grid, and DM - magnitude distribution, for FRBs of DM given a grid object. - Note: this calculates a prior for PATH, but is a posterior - from zDM's point of view. - - Args: - grid (class grid object): grid object defining p(z,DM) - DM (float, np.ndarray OR scalar): FRB DM(s) - - Returns: - papps (np.ndarray, floats): probability distribution of apparent magnitudes given DM - pz (np.ndarray, floats): probability distribution of redshift given DM + Loads the Marnoch et al data on r-band magnitudes from FRB hosts """ - # Step 1: get prior on z - pz = get_pz_prior(grid,DM) - - ### STEP 2: get apparent magnitude distribution ### - if hasattr(DM,"__len__"): - papps = np.dot(self.maghist,pz) - else: - papps = self.maghist*pz - - - return papps,pz + from astropy.table import Table + datafile="magnitudes_and_probabilities_vlt-fors2_R-SPECIAL.ecsv" + infile = os.path.join(resources.files('zdm'), 'data', 'optical', datafile) + table = Table.read(infile, format='ascii.ecsv') + return table def get_pz_prior(grid, DM): """ @@ -516,22 +1295,123 @@ def get_pz_prior(grid, DM): pz = pz/np.sum(pz,axis=0) return pz -def SimpleApparentMags(Abs,zs): + +def SimplekApparentMags(Abs,k,zs): + """ + Convert absolute to apparent magnitudes with a power-law k-correction. + + Applies the distance modulus plus a k-correction of the form + ``2.5 * k * log10(1 + z)``. + + Args: + Abs (float or np.ndarray): absolute magnitude(s) M_r. + k (float): k-correction power-law index. ``k=0`` reduces to a + pure distance modulus (identical to ``SimpleApparentMags``). + zs (float or np.ndarray): redshift(s) of the galaxies. + + Returns: + ApparentMags: apparent magnitude(s). Scalar if both inputs are + scalar; 1-D array if one is scalar and one is an array; 2-D + array of shape (NAbs, Nz) if both are arrays (computed via + ``np.outer``). """ - Function to convert galaxy absolue to apparent magnitudes. - Nominally, magnitudes are r-band magnitudes, but this function - is so simple it doesn't matter. + # calculates luminosity distances (Mpc) + lds = cos.dl(zs) + + # finds distance relative to absolute magnitude distance + dabs = 1e-5 # in units of Mpc + + # k-corrections + kcorrs = (1+zs)**k - Just applies a distance correction - no k-correction. + dk = 2.5*np.log10(kcorrs) #i.e., 2.5*k*np.log10(1+z) + # relative magnitude + dMag = 2.5*np.log10((lds/dabs)**(2)) + dk + + + if np.isscalar(zs) or np.isscalar(Abs): + # just return the product, be it scalar x scalar, + # scalar x array, or array x scalar + # this also ensures that the dimensions are as expected + + ApparentMags = Abs + dMag + else: + # Convert to multiplication so we can use + # numpy.outer + temp1 = 10**Abs + temp2 = 10**dMag + ApparentMags = np.outer(temp1,temp2) + ApparentMags = np.log10(ApparentMags) + return ApparentMags + +def SimplekAbsoluteMags(App,k,zs): + """ + Convert apparent to absolute magnitudes with a power-law k-correction. + + Inverse of ``SimplekApparentMags``: subtracts the distance modulus and + k-correction ``2.5 * k * log10(1 + z)`` from the apparent magnitude. + Args: - Abs (float or array of floats): intrinsic galaxy luminosities - zs (float or array of floats): redshifts of galaxies + App (float or np.ndarray): apparent magnitude(s) m_r. + k (float): k-correction power-law index. ``k=0`` reduces to a + pure distance modulus (identical to ``SimpleAbsoluteMags``). + zs (float or np.ndarray): redshift(s) of the galaxies. + + Returns: + AbsoluteMags: absolute magnitude(s). Scalar if both inputs are + scalar; 1-D array if one is scalar and one is an array; 2-D + array of shape (NApp, Nz) if both are arrays (computed via + ``np.outer``). + """ + # calculates luminosity distances (Mpc) + lds = cos.dl(zs) + + # finds distance relative to absolute magnitude distance + dabs = 1e-5 # in units of Mpc + + # k-corrections + kcorrs = (1+zs)**k + + dk = 2.5*np.log10(kcorrs) + + # relative magnitude + dMag = 2.5*np.log10((lds/dabs)**(2)) + dk + + + if np.isscalar(zs) or np.isscalar(Abs): + # just return the product, be it scalar x scalar, + # scalar x array, or array x scalar + # this also ensures that the dimensions are as expected + + AbsoluteMags = App - dMag + else: + # Convert to multiplication so we can use + # numpy.outer + temp1 = 10**App + temp2 = 10**-dMag + AbsoluteMags = np.outer(temp1,temp2) + AbsoluteMags = np.log10(ApparentMags) + return AbsoluteMags + +def SimpleAbsoluteMags(App,zs): + """ + Convert apparent to absolute magnitudes using the distance modulus only. + + Subtracts ``5 * log10(D_L / 10 pc)`` from the apparent magnitude, where + D_L is the luminosity distance in Mpc. No k-correction is applied. + + Args: + App (float or np.ndarray): apparent magnitude(s) m_r. + zs (float or np.ndarray): redshift(s) of the galaxies. + Returns: - ApparentMags: NAbs x NZ array of magnitudes, where these - are the dimensions of the inputs + AbsoluteMags: absolute magnitude(s). Scalar if both inputs are + scalar; 1-D array if one input is scalar and one is an array; + 2-D array of shape (NApp, Nz) if both are arrays (computed via + ``np.outer``). """ # calculates luminosity distances (Mpc) @@ -541,8 +1421,48 @@ def SimpleApparentMags(Abs,zs): dabs = 1e-5 # in units of Mpc # relative magnitude - dMag = 2.5*np.log10((lds/dabs)**2) + dMag = 2.5*np.log10((lds/dabs)**(2)) + + + if np.isscalar(zs) or np.isscalar(Abs): + # just return the product, be it scalar x scalar, + # scalar x array, or array x scalar + # this also ensures that the dimensions are as expected + + AbsoluteMags = App - dMag + else: + # Convert to multiplication so we can use + # numpy.outer + temp1 = 10**App + temp2 = 10**-dMag + AbsoluteMags = np.outer(temp1,temp2) + AbsoluteMags = np.log10(ApparentMags) + return AbsoluteMags + +def SimpleApparentMags(Abs,zs): + """ + Convert absolute to apparent magnitudes using the distance modulus only. + + Adds ``5 * log10(D_L / 10 pc)`` to the absolute magnitude, where + D_L is the luminosity distance in Mpc. No k-correction is applied. + + Args: + Abs (float or np.ndarray): absolute magnitude(s) M_r. + zs (float or np.ndarray): redshift(s) of the galaxies. + + Returns: + ApparentMags: apparent magnitude(s). Scalar if both inputs are + scalar; 1-D array if one input is scalar and one is an array; + 2-D array of shape (NAbs, Nz) if both are arrays (computed via + ``np.outer``). + """ + # calculates luminosity distances (Mpc) + lds = cos.dl(zs) + lds_pc = lds*1e6 + + # relative magnitude + dMag = 5*np.log10(lds_pc) - 5 if np.isscalar(zs) or np.isscalar(Abs): # just return the product, be it scalar x scalar, @@ -562,15 +1482,23 @@ def SimpleApparentMags(Abs,zs): def p_unseen_Marnoch(zvals,plot=False): """ - Returns probability of a hist being unseen in typical VLT - observations. - - Inputs: - zvals [float, array]: array of redshifts - + Return the probability that an FRB host galaxy is unseen in typical VLT observations. + + Digitises Figure 3 of Marnoch et al. 2023 (MNRAS 525, 994), which shows + p(U | z) — the cumulative probability that a host galaxy at redshift z + falls below the VLT/FORS2 R-band detection limit. A cubic polynomial is + fit to the digitised curve and evaluated at the requested redshifts. + Values are clamped to [0, 1]. + + Args: + zvals (float or np.ndarray): redshift(s) at which to evaluate p(U | z). + plot (bool): if True, save a diagnostic comparison plot of the raw + digitised data, linear interpolation, and polynomial fit to + ``p_unseen.pdf``. Defaults to False. + Returns: - fitv [float, array]: p(Unseen) for redshift zvals - + fitv (np.ndarray): p(U | z) evaluated at each element of ``zvals``, + clamped to the range [0, 1]. """ # approx digitisation of Figure 3 p(U|z) # from Marnoch et al. @@ -615,7 +1543,20 @@ def p_unseen_Marnoch(zvals,plot=False): def simplify_name(TNSname): """ - Simplifies an FRB name to basics + Reduce a TNS FRB name to a six-character YYMMDD[L] identifier. + + Strips the leading ``FRB`` prefix (if present) and the year's + century digits, retaining only the six-digit date plus any + trailing letter suffix, to allow case-insensitive matching + between survey entries and external FRB catalogues. + + Args: + TNSname (str): FRB name in TNS format, e.g. ``'FRB20180924B'`` + or ``'20180924B'``. + + Returns: + name (str): simplified six-character identifier, e.g. ``'180924B'`` + (six digits plus optional letter). """ # reduces all FRBs to six integers @@ -636,10 +1577,21 @@ def simplify_name(TNSname): def matchFRB(TNSname,survey): """ - Gets the FRB id from the survey list - Returns None if not in the survey - Used to match properties between a survey - and other FRB libraries + Find the index of an FRB in a survey by TNS name. + + Uses ``simplify_name`` to normalise both the query name and the survey + entries, so minor formatting differences (century digits, trailing + letters) do not prevent a match. + + Args: + TNSname (str): TNS name of the FRB to look up, e.g. + ``'FRB20180924B'``. + survey (Survey): loaded survey object whose ``frbs["TNS"]`` column + contains TNS names of detected FRBs. + + Returns: + int or None: index into ``survey.frbs`` of the first matching FRB, + or ``None`` if the FRB is not found in the survey. """ name = simplify_name(TNSname) @@ -651,113 +1603,26 @@ def matchFRB(TNSname,survey): return match -# this defines the ICS FRBs for which we have PATH info -frblist=['FRB20180924B','FRB20181112A','FRB20190102C','FRB20190608B', - 'FRB20190611B','FRB20190711A','FRB20190714A','FRB20191001A', - 'FRB20191228A','FRB20200430A','FRB20200906A','FRB20210117A', - 'FRB20210320C','FRB20210807D','FRB20211127I','FRB20211203C', - 'FRB20211212A','FRB20220105A','FRB20220501C', - 'FRB20220610A','FRB20220725A','FRB20220918A', - 'FRB20221106A','FRB20230526A','FRB20230708A', - 'FRB20230731A','FRB20230902A','FRB20231226A','FRB20240201A', - 'FRB20240210A','FRB20240304A','FRB20240310A'] - - - -def run_path(name,model,PU=0.1,usemodel = False, sort = False): - """ - evaluates PATH on an FRB - - absolute [bool]: if True, treats rel_error as an absolute value - in arcseconds +def plot_frb(name,ralist,declist,plist,opfile): """ - from frb.frb import FRB - from astropath.priors import load_std_priors - from astropath.path import PATH - - ######### Loads FRB, and modifes properties ######### - my_frb = FRB.by_name(name) - - # do we even still need this? I guess not, but will keep it here just in case - my_frb.set_ee(my_frb.sig_a,my_frb.sig_b,my_frb.eellipse['theta'], - my_frb.eellipse['cl'],True) - - # reads in galaxy info - ppath = os.path.join(resources.files('frb'), 'data', 'Galaxies', 'PATH') - pfile = os.path.join(ppath, f'{my_frb.frb_name}_PATH.csv') - ptbl = pandas.read_csv(pfile) - - # Load prior - priors = load_std_priors() - prior = priors['adopted'] # Default - - theta_new = dict(method='exp', - max=priors['adopted']['theta']['max'], - scale=0.5) - prior['theta'] = theta_new - - # change this to something depending on the FRB DM - prior['U']=PU - - candidates = ptbl[['ang_size', 'mag', 'ra', 'dec', 'separation']] - - this_path = PATH() - this_path.init_candidates(candidates.ra.values, - candidates.dec.values, - candidates.ang_size.values, - mag=candidates.mag.values) - this_path.frb = my_frb - - frb_eellipse = dict(a=my_frb.sig_a, - b=my_frb.sig_b, - theta=my_frb.eellipse['theta']) - - this_path.init_localization('eellipse', - center_coord=this_path.frb.coord, - eellipse=frb_eellipse) - - # this results in a prior which is uniform in log space - # when summed over all galaxies with the same magnitude - if usemodel: - this_path.init_cand_prior('user', P_U=prior['U']) - else: - this_path.init_cand_prior('inverse', P_U=prior['U']) - - # this is for the offset - this_path.init_theta_prior(prior['theta']['method'], - prior['theta']['max'], - prior['theta']['scale']) - - P_O=this_path.calc_priors() - - # Calculate p(O_i|x) - debug = True - P_Ox,P_U = this_path.calc_posteriors('fixed', - box_hwidth=10., - max_radius=10., - debug=debug) - mags = candidates['mag'] - - if sort: - indices = np.argsort(P_Ox) - P_O = P_O[indices] - P_Ox = P_Ox[indices] - mags = mags[indices] - - return P_O,P_Ox,P_U,mags - - + Plot an FRB localisation and its PATH host galaxy candidates. + Produces a scatter plot showing the FRB position and a set of + deviated/sampled positions colour-coded by their PATH posterior + P(O|x), overlaid with circles representing candidate host galaxies + scaled by their angular size. All coordinates are shown in arcseconds + relative to the FRB position. -def plot_frb(name,ralist,declist,plist,opfile): - """ - does an frb - - absolute [bool]: if True, treats rel_error as an absolute value - in arcseconds - - clist: list of astropy coordinates - plist: list of p(O|x) for candidates hosts + Args: + name (str): TNS FRB name (e.g. ``'FRB20180924B'``), used to load + the FRB object and the corresponding PATH candidate table. + ralist (np.ndarray): right ascension values (degrees) of deviated + FRB positions to plot, colour-coded by ``plist``. + declist (np.ndarray): declination values (degrees) of deviated + FRB positions. + plist (np.ndarray): PATH posterior values P(O|x) for the deviated + positions, used to set the colour scale. + opfile (str): output file path for the saved figure. """ from frb.frb import FRB @@ -810,15 +1675,35 @@ def plot_frb(name,ralist,declist,plist,opfile): plt.tight_layout() plt.close() +def pUgm(mag,mean,width): + """ + Return the probability that a galaxy is undetected as a function of magnitude. + Models the survey detection completeness as a logistic function that + transitions from ~0 (bright, always detected) to ~1 (faint, never + detected) with a smooth rolloff centred on ``mean``: + p(U | m) = 1 / (1 + exp((mean - m) / width)) -def load_marnoch_data(): - """ - Loads the Marnoch et al data on r-band magnitudes from FRB hosts + Args: + mag (float or np.ndarray): r-band apparent magnitude(s) at which to + evaluate the detection-failure probability. + mean (float): magnitude at which p(U | m) = 0.5 (the 50% completeness + limit of the survey). + width (float): characteristic width of the completeness rolloff in + magnitudes. Smaller values give a sharper transition. + + Returns: + pU (float or np.ndarray): probability of non-detection at each + magnitude in ``mag``, in the range [0, 1]. """ - from astropy.table import Table - datafile="magnitudes_and_probabilities_vlt-fors2_R-SPECIAL.ecsv" - infile = os.path.join(resources.files('zdm'), 'data', 'optical', datafile) - table = Table.read(infile, format='ascii.ecsv') - return table + + # converts to a number relative to the mean. Will be weird for mags < 0. + diff = (mean-mag)/width + + # converts the diff to a power of 10 + pU = 1./(1+np.exp(diff)) + + return pU + + diff --git a/zdm/optical_numerics.py b/zdm/optical_numerics.py new file mode 100644 index 00000000..688aa179 --- /dev/null +++ b/zdm/optical_numerics.py @@ -0,0 +1,884 @@ +""" +Numerical routines for evaluating and optimising FRB host galaxy magnitude models. + +This module is the numerical workhorse for the PATH integration in zdm, +analogous to ``iteration.py`` for the zdm grid. It provides: + +- **``function``** objective function passed to ``scipy.optimize.minimize`` + that evaluates a goodness-of-fit statistic for a given set of host model + parameters against the CRAFT ICS optical data. + +- **``calc_path_priors``** inner loop that runs PATH on a list of FRBs + across one or more surveys/grids, collecting priors, posteriors, and + undetected-host probabilities for each FRB. + +- **``run_path``** runs the PATH algorithm for a single named FRB, + loading its candidate host galaxies from the ``frb`` package data + and applying colour corrections to convert to r-band. + +- **``calculate_likelihood_statistic``** and **``calculate_ks_statistic``** + — goodness-of-fit statistics comparing the model apparent magnitude prior + to the observed PATH posteriors across all FRBs. + +- **``make_cumulative_plots``** plotting routine for visualising + cumulative magnitude distributions for one or more models simultaneously. + +- **``make_wrappers``**, **``make_cdf``**, **``flatten``**, + **``get_cand_properties``** supporting utilities. +""" + +import os +from importlib import resources +import numpy as np +from matplotlib import pyplot as plt +import pandas + +from zdm import optical as op + +from frb.frb import FRB +from astropath.priors import load_std_priors +from astropath.path import PATH +from frb.associate import frbassociate + +def function(x,args): + """ + Objective function for ``scipy.optimize.minimize`` over host model parameters. + + Updates the host magnitude model with parameter vector ``x``, runs PATH + on all FRBs, computes the chosen goodness-of-fit statistic, and returns + a scalar value suitable for minimisation (i.e. smaller is better). + + Parameters + ---------- + x : np.ndarray + Parameter vector passed to ``model.init_args(x)``. Its meaning + depends on the model (e.g. absolute magnitude bin weights for + ``simple_host_model``, or ``fSFR`` for ``loudas_model``). + args : list + Packed argument tuple with the following elements, in order: + + - ``frblist`` (list of str): TNS names of FRBs to evaluate. + - ``ss`` (list of Survey): surveys in which the FRBs may appear. + - ``gs`` (list of Grid): zdm grids corresponding to those surveys. + - ``model``: host magnitude model instance (must implement + ``init_args``). + - ``POxcut`` (float or None): if not None, restrict the statistic + to FRBs whose best host candidate has P(O|x) > POxcut. + - ``istat`` (int): statistic to use — 0 for KS-like statistic, + 1 for maximum-likelihood (returned as negative log-likelihood + so that minimisation maximises the likelihood). + + Returns + ------- + stat : float + Goodness-of-fit statistic (smaller is better). For ``istat=1`` + this is the negative log-likelihood. + """ + + frblist = args[0] + ss = args[1] + gs=args[2] + model=args[3] + POxcut=args[4] # either None, or a cut such as 0.9 + istat=args[5] + + # initialises model to the priors + # generates one per grid, due to possible different zvals + model.init_args(x) + wrappers = make_wrappers(model,gs) + + NFRB,AppMags,AppMagPriors,ObsMags,ObsPriors,ObsPosteriors,PUprior,PUobs,sumPUprior,sumPUobs,frbs,dms = calc_path_priors(frblist,ss,gs,wrappers,verbose=False) + + # we re-normalise the sum of PUs by NFRB + + # prevents infinite plots being created + if istat==0: + stat = calculate_ks_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors, + sumPUobs,sumPUprior,plotfile=None,POxcut=POxcut) + elif istat==1: + stat = calculate_likelihood_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors, + PUobs,PUprior,plotfile=None,POxcut=POxcut) + # need to construct stat so that small values are good! Log-likelihood being good means large! + stat *= -1 + + return stat + +def make_wrappers(model,grids): + """ + returns a list of model wrapper objects for given model and grids + + Args: + model: one of the optical model class objects + grids: list of grid class objects + + Returns: + wrappers: list of wrappers around model, one for each grid + """ + wrappers = [] + for i,g in enumerate(grids): + wrappers.append(op.model_wrapper(model,g.zvals)) + return wrappers + + +def make_cdf(xs,ys,ws,norm=True): + """ + Build a weighted empirical CDF evaluated on a fixed grid. + + For each grid point ``x`` in ``xs``, accumulates the weights ``ws[i]`` + of all observations ``ys[i]`` that fall below ``x``. The result is a + non-decreasing array that can be compared to a model prior CDF. + + Parameters + ---------- + xs : np.ndarray + Grid of x values at which to evaluate the CDF (e.g. apparent + magnitude bin centres). Must be sorted in ascending order. + ys : array-like + Observed data values (e.g. host galaxy apparent magnitudes). + ws : array-like + Weight for each observation in ``ys`` (e.g. PATH posteriors P_Ox). + Must have the same length as ``ys``. + norm : bool, optional + If True (default), normalise the CDF so that its maximum value + is 1. Set to False to preserve the raw cumulative weight sum. + + Returns + ------- + cdf : np.ndarray, shape (len(xs),) + Weighted empirical CDF evaluated at each point in ``xs``. + """ + cdf = np.zeros([xs.size]) + for i,y in enumerate(ys): + OK = np.where(xs > y)[0] + cdf[OK] += ws[i] + if norm: + cdf /= cdf[-1] + return cdf + + +def calc_path_priors(frblist,ss,gs,wrappers,verbose=True,usemodel=True,P_U=0.1): + """ + Run PATH on a list of FRBs and return priors, posteriors, and P_U values. + + For each FRB in ``frblist``, searches all surveys in ``ss`` for a match, + computes the zdm-derived apparent magnitude prior (if ``usemodel=True``), + and runs PATH to produce host association posteriors. Results for all FRBs + are collected into parallel lists (one entry per FRB). + + Also writes a CSV file ``allgalaxies.csv`` (if it does not already exist) + containing the magnitude and VLT/FORS2 R-band columns for all candidate + host galaxies across all FRBs. + + Parameters + ---------- + frblist : list of str + TNS names of FRBs to process (e.g. ``['FRB20180924B', ...]``). + ss : list of Survey + Survey objects to search for each FRB. The first survey containing + a given FRB is used. + gs : list of Grid + zdm grids corresponding to each survey in ``ss``. + wrappers : list of model_wrapper + One ``model_wrapper`` per grid (from ``make_wrappers``), used to + compute DM-dependent apparent magnitude priors. + verbose : bool, optional + If True, print a warning for each FRB not found in any survey. + Defaults to True. + usemodel : bool, optional + If True, use the zdm-derived magnitude prior from ``wrappers`` and + estimate P_U from the model. If False, use PATH's built-in inverse + prior and the supplied fixed ``P_U``. Defaults to True. + P_U : float, optional + Fixed prior probability that the host galaxy is undetected. Only + used when ``usemodel=False``. Defaults to 0.1. + + Returns + ------- + nfitted : int + Number of FRBs successfully matched to a survey and processed. + AppMags : np.ndarray + Internal apparent magnitude grid (from the last processed wrapper). + allMagPriors : list of np.ndarray + One array per FRB giving p(m_r | DM_EG) on the ``AppMags`` grid. + Entries are ``None`` when ``usemodel=False``. + allObsMags : list of np.ndarray + One array per FRB listing the r-band magnitudes of PATH candidate + host galaxies. + allPO : list of np.ndarray + One array per FRB giving the PATH prior P_O for each candidate. + allPOx : list of np.ndarray + One array per FRB giving the PATH posterior P(O|x) for each candidate. + allPU : list of float + Prior P_U (probability of unseen host) for each FRB. + allPUx : list of float + Posterior P(U|x) (probability host is unseen, given data) for each FRB. + sumPU : float + Sum of ``allPU`` across all FRBs. + sumPUx : float + Sum of ``allPUx`` across all FRBs. + frbs : list of str + TNS names of the FRBs that were successfully matched and processed. + dms : list of float + Extragalactic DM (pc cm⁻³) for each FRB in ``frbs``. + """ + + NFRB = len(frblist) + + # old version creating 1D lists + #allObsMags = None + #allPOx = None + #allMagPriors = None + + # new version recording one list per FRB. For max likelihood functionality + allObsMags = [] + allPOx = [] + allPO = [] + allMagPriors = [] + + sumPU = 0. + sumPUx = 0. + allPU = [] + allPUx = [] + nfitted = 0 + + frbs=[] + dms=[] + + for i,frb in enumerate(frblist): + # interates over the FRBs. "Do FRB" + # P_O is the prior for each galaxy + # P_Ox is the posterior + # P_Ux is the posterior for it being unobserved + # mags is the list of galaxy magnitudes + + # determines if this FRB was seen by the survey, and + # if so, what its DMEG is + for j,s in enumerate(ss): + imatch = op.matchFRB(frb,s) + if imatch is not None: + # this is the survey to be used + g=gs[j] + s = ss[j] + if usemodel: + wrapper = wrappers[j] + jmatch = j + frbs.append(frb) + break + + if imatch is None: + if verbose: + print("Could not find ",frb," in any survey") + continue + + nfitted += 1 + + if usemodel: + AppMags = wrapper.AppMags + else: + AppMags = None + + # record this info + DMEG = s.DMEGs[imatch] + dms.append(DMEG) + + if usemodel: + + # this is where the particular survey comes into it + # Must be priors on magnitudes for this FRB + wrapper.init_path_raw_prior_Oi(DMEG,g) + + # extracts priors as function of absolute magnitude for this grid and DMEG + MagPriors = wrapper.priors + else: + MagPriors = None + + # defunct now + #mag_limit=26 # might not be correct. TODO! Should be in FRB object + + # calculates unseen prior + if usemodel: + P_U = wrapper.estimate_unseen_prior() + #MagPriors[:] = 1./len(MagPriors) # log-uniform priors when no model used + + + P_O,P_Ox,P_Ux,ObsMags,ptbl = run_path(frb,usemodel=usemodel,P_U = P_U) + + + # replaces PO value with raw PO value, i.e. excluding the driver sigma + if usemodel: + P_O = wrapper.path_base_prior(ObsMags) + + # kept here for debugging + if False: + print("P_U is ",P_U) + print("P_O is ",P_O) + print("P_Ox is ",P_Ox) + plt.figure() + plt.plot(AppMags,MagPriors) + plt.show() + plt.close() + + if i==0: + allgals = ptbl + else: + allgals = pandas.concat([allgals,ptbl], ignore_index=True) + + ObsMags = np.array(ObsMags) + + # new version creating a list of lists + allObsMags.append(ObsMags) + allPOx.append(P_Ox) + allPO.append(P_O) + allMagPriors.append(MagPriors) + + sumPU += P_U + sumPUx += P_Ux + allPU.append(P_U) + allPUx.append(P_Ux) + + subset = allgals[['frb','mag','VLT_FORS2_R']].copy() + + # saves all galaxies + if not os.path.exists("allgalaxies.csv"): + subset.to_csv("allgalaxies.csv",index=False) + + return nfitted,AppMags,allMagPriors,allObsMags,allPO,allPOx,allPU,allPUx,sumPU,sumPUx,frbs,dms + + +def calculate_likelihood_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUobs, + PUprior,plotfile=None,POxcut=None): + """ + Compute the total log-likelihood of the observed PATH posteriors given the model prior. + + For each FRB, evaluates log10(Σ P(O_i|x) / rescale + P_U_prior), where the + rescale factor accounts for PATH's internal renormalisation of posteriors + relative to the model prior. Summing over all FRBs gives the total + log-likelihood returned to the caller. + + Parameters + ---------- + NFRB : int + Number of FRBs to sum over. + AppMags : np.ndarray, shape (NMAG,) + Apparent magnitude grid used to compute the model prior (not used + directly in this function, but kept for API consistency with + ``calculate_ks_statistic``). + AppMagPriors : list of np.ndarray, length NFRB + Model prior p(m_r | DM_EG) on the ``AppMags`` grid, one array per FRB. + ObsMags : list of np.ndarray, length NFRB + Observed r-band magnitudes of PATH candidate host galaxies, one array + per FRB (length NCAND varies by FRB). + ObsPosteriors : list of np.ndarray, length NFRB + PATH posterior P(O_i|x) for each candidate, one array per FRB. + PUobs : list of float, length NFRB + PATH posterior P(U|x) — probability that the true host is undetected — + for each FRB, as returned by PATH after renormalisation. + PUprior : list of float, length NFRB + Model prior P_U for each FRB, as estimated by + ``wrapper.estimate_unseen_prior()``. + plotfile : str or None, optional + If provided, save a diagnostic plot comparing prior and posterior + magnitude distributions to this file path. Defaults to None. + POxcut : float or None, optional + If not None, restrict the statistic to FRBs whose maximum P(O|x) + exceeds this threshold (simulates requiring a confident host ID). + Defaults to None. + + Returns + ------- + stat : float + Total log10-likelihood summed over all NFRB FRBs. Larger values + indicate a better fit. Multiply by -1 for use as a minimisation + objective. + """ + # calculates log-likelihood of observation + stat=0 + + for i in np.arange(NFRB): + # sums the likelihoods over each galaxy: p(xi|oi)*p(oi)/Pfield + + # calculate the factor by which the p...|x probabilities have been rescaled. + # allows us to undo this effect + rescale = PUobs[i]/PUprior[i] + # the problem is that the posteriors have been rescaled by some factor + # we do not want this! Hence, we work out the rescale factor by comparing + # the rescale on the unseen prior. Then we undo this factor + # (Note: PUobs / rescale = PUprior, hence must divide) + sumpost = np.sum(ObsPosteriors[i])/rescale+PUprior[i] + + if False: + plt.figure() + plt.plot(AppMags,AppMagPriors[i]/np.max(AppMagPriors[i]),label="priors from model") + for j,mag in enumerate(ObsMags[i]): + plt.scatter(ObsMags[i],ObsPosteriors[i],label="posteriors") + + print("Sum gives ",sumpost, " of which PU is ",PUprior[i]) + plt.show() + plt.close() + ll = np.log10(sumpost) + stat += ll + + + return stat + +def flatten(xss): + """ + Flatten a list of lists into a single flat list. + """ + return [x for xs in xss for x in xs] + +def calculate_ks_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUobs, + PUprior,plotfile=None,POxcut=None,plotlabel=None,abc=None,tag=""): + """ + Compute a KS-like goodness-of-fit statistic between model prior and observed posteriors. + + Builds cumulative magnitude distributions for both the model prior and the + PATH posteriors, normalised by the number of FRBs, and returns the maximum + absolute difference between them — analogous to the KS test statistic. + + Optionally produces a plot comparing the two cumulative distributions. + + Parameters + ---------- + NFRB : int + Number of FRBs used for normalisation. + AppMags : np.ndarray, shape (NMAG,) + Apparent magnitude grid on which priors are defined. + AppMagPriors : list of np.ndarray, length NFRB + Model prior p(m_r | DM_EG) on ``AppMags``, one array per FRB. + ObsMags : list of np.ndarray, length NFRB + Observed r-band magnitudes of PATH candidate galaxies, one per FRB. + ObsPosteriors : list of np.ndarray, length NFRB + PATH posteriors P(O_i|x) for each candidate, one array per FRB. + PUobs : list of float + Posterior P(U|x) for each FRB (not used directly in the statistic, + kept for API consistency). + PUprior : list of float + Prior P_U for each FRB (not used directly, kept for API consistency). + plotfile : str or None, optional + If provided, save a CDF comparison plot to this path. Defaults to None. + POxcut : float or None, optional + If not None, restrict to candidates with P(O|x) > POxcut and + normalise both CDFs to unity (simulates the approach of selecting + only confidently identified hosts). Defaults to None. + plotlabel : str or None, optional + Text label placed in the centre-bottom of the plot. Defaults to None. + abc : str or None, optional + Panel label (e.g. ``'(a)'``) placed in the upper-left corner of the + figure in figure-coordinate space. Defaults to None. + tag : str, optional + String prefix added to the legend labels ``"Observed"`` and + ``"Prior"``. Defaults to ``""``. + + Returns + ------- + stat : float + Maximum absolute difference between the observed and prior cumulative + distributions. Smaller values indicate a better fit. + """ + # sums the apparent mag priors over all FRBs to create a cumulative distribution + fAppMagPriors = np.zeros([len(AppMags)]) + + for i,amp in enumerate(AppMagPriors): + fAppMagPriors += amp + + + fObsPosteriors = np.array(flatten(ObsPosteriors)) + + fObsMags = np.array(flatten(ObsMags)) + + # we calculate a probability using a cumulative distribution + prior_dist = np.cumsum(fAppMagPriors) + + if POxcut is not None: + # cuts data to "good" FRBs only + OK = np.where(fObsPosteriors > POxcut)[0] + Ndata = len(OK) + fObsMags = fObsMags[OK] + fObsPosteriors = np.full([Ndata],1.) # effectively sets these to unity + + + # makes a cdf in units of AppMags, with observations ObsMags weighted by ObsPosteriors + obs_dist = make_cdf(AppMags,fObsMags,fObsPosteriors,norm=False) + + if POxcut is not None: + # current techniques just assume we have the full distribution + obs_dist /= obs_dist[-1] + prior_dist /= prior_dist[-1] + else: + # the above is normalised to NFRB. We now divide it by this + # might want to be careful here, and preserve this normalisation + obs_dist /= NFRB + prior_dist /= NFRB #((NFRB-PUprior)/NFRB) / prior_dist[-1] + + # we calculate something like the k-statistic. Includes NFRB normalisation + diff = obs_dist - prior_dist + stat = np.max(np.abs(diff)) + + if plotfile is not None: + plt.figure() + plt.xlabel("Apparent magnitude $m_r$") + plt.ylabel("Cumulative host galaxy distribution") + plt.ylim(0,1) + + # calcs lowest x that is essentially at max + ixmax = np.where(prior_dist > prior_dist[-1]*0.999)[0][0] + # rounds it up to multiple of 5 + xmax = 5 * (int(AppMags[ixmax]/5.)+1) + ixmin = np.where(prior_dist < 0.01)[0][-1] + xmin = 5*(int(AppMags[ixmin]/5.)) + plt.xlim(xmin,xmax) + + #cx,cy = make_cdf_for_plotting(ObsMags,weights=ObsPosteriors) + plt.plot(AppMags,obs_dist,label=tag+"Observed",color="black") + plt.plot(AppMags,prior_dist,label=tag+"Prior",linestyle=":") + plt.legend() + + # adds label to plot + if plotlabel is not None: + plt.text((xmin+xmax)/2.,0.05,plotlabel) + + if abc is not None: + plt.text(0.02,0.9,abc,fontsize=16, transform=plt.gcf().transFigure) + + plt.tight_layout() + plt.savefig(plotfile) + plt.close() + + + return stat + +def make_cumulative_plots(NMODELS,NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUobs, + PUprior,plotfile,plotlabel,POxcut=None,abc=None,onlyobs=None, + greyscale=[],addpriorlabel=True): + """ + Plot cumulative apparent magnitude distributions for multiple host models on one figure. + + Computes the same normalised prior and observed CDFs as + ``calculate_ks_statistic``, but for ``NMODELS`` models simultaneously, + overlaying them on a single figure with distinct line styles. + + All list-valued parameters that appear in ``calculate_ks_statistic`` + gain an additional leading dimension of size ``NMODELS`` here. + + Parameters + ---------- + NMODELS : int + Number of models to plot. + NFRB : list of int, length NMODELS + Number of FRBs for each model, used for normalisation. + AppMags : list of np.ndarray, length NMODELS + Apparent magnitude grid for each model. + AppMagPriors : list of lists of np.ndarray, shape (NMODELS, NFRB, NMAG) + Model prior p(m_r | DM_EG) for each model and FRB. + ObsMags : list of lists of np.ndarray, shape (NMODELS, NFRB, NCAND) + Observed candidate magnitudes for each model and FRB. + ObsPosteriors : list of lists of np.ndarray, shape (NMODELS, NFRB, NCAND) + PATH posteriors P(O_i|x) for each model and FRB. + PUobs : list, length NMODELS + Posterior P(U|x) per model (not used directly in the plot). + PUprior : list, length NMODELS + Prior P_U per model (not used directly in the plot). + plotfile : str + Output file path for the saved figure. + plotlabel : list of str, length NMODELS + Legend label prefix for each model. + POxcut : float or None, optional + If not None, restrict to candidates with P(O|x) > POxcut and + normalise CDFs to unity. Defaults to None. + abc : str or None, optional + Panel label (e.g. ``'(a)'``) placed in the upper-left corner in + figure-coordinate space. Defaults to None. + onlyobs : int or None, optional + If not None, only draw the observed CDF for the model with this + index (useful when all models share the same observations). The + observed line is then labelled ``"Observed"`` without a model prefix. + Defaults to None. + greyscale : list of int, optional + Indices of models whose observed CDF should additionally be drawn + in grey (for background reference). Defaults to ``[]``. + addpriorlabel : bool, optional + If True (default), append ``": Prior"`` to each model's legend entry. + Set to False to use only ``plotlabel[imodel]`` as the label. + + Returns + ------- + None + """ + + # arrays to hold created observed and prior distributions + prior_dists = [] + obs_dists = [] + linestyles=[":","--","-.","-"] + + # loops over models to create prior distributions + for imodel in np.arange(NMODELS): + # sums the apparent mag priors over all FRBs to create a cumulative distribution + fAppMagPriors = np.zeros([len(AppMags[imodel])]) + + for i,amp in enumerate(AppMagPriors[imodel]): + fAppMagPriors += amp + + fObsPosteriors = np.array(flatten(ObsPosteriors[imodel])) + + fObsMags = np.array(flatten(ObsMags[imodel])) + + # we calculate a probability using a cumulative distribution + prior_dist = np.cumsum(fAppMagPriors) + + if POxcut is not None: + # cuts data to "good" FRBs only + OK = np.where(fObsPosteriors > POxcut)[0] + Ndata = len(OK) + fObsMags = fObsMags[OK] + fObsPosteriors = np.full([Ndata],1.) # effectively sets these to unity + + + # makes a cdf in units of AppMags, with observations ObsMags weighted by ObsPosteriors + obs_dist = make_cdf(AppMags[imodel],fObsMags,fObsPosteriors,norm=False) + + if POxcut is not None: + # current techniques just assume we have the full distribution + obs_dist /= obs_dist[-1] + prior_dist /= prior_dist[-1] + else: + # the above is normalised to NFRB. We now divide it by this + # might want to be careful here, and preserve this normalisation + obs_dist /= NFRB[imodel] + prior_dist /= NFRB[imodel] #((NFRB-PUprior)/NFRB) / prior_dist[-1] + + # we calculate something like the k-statistic. Includes NFRB normalisation + diff = obs_dist - prior_dist + stat = np.max(np.abs(diff)) + + obs_dists.append(obs_dist) + prior_dists.append(prior_dist) + + # plotting! + plt.figure() + plt.xlabel("Apparent magnitude $m_r$") + plt.ylabel("Cumulative host galaxy distribution") + plt.ylim(0,1) + + for imodel in np.arange(NMODELS): + + if onlyobs is None or onlyobs == imodel: + if onlyobs is not None: + color = 'black' + label = "Observed" # don't sub-label, since this stands in for all observed + else: + color=plt.gca().lines[-1].get_color() + label = plotlabel[imodel]+": Observed" + + plt.plot(AppMags[imodel],obs_dists[imodel],label=label, + color=color) + + # adds gryescale 'background' plots of observed distributions + if imodel in greyscale: + plt.plot(AppMags[imodel],obs_dists[imodel],color="gray") + # add these in greyscale, to highlight they are 'background' plots + # this option never used, but experimented with. + + + # calcs lowest x that is essentially at max + ixmax = np.where(prior_dist > prior_dist[-1]*0.999)[0][0] + # rounds it up to multiple of 5 + xmax = 5 * (int(AppMags[imodel][ixmax]/5.)+1) + ixmin = np.where(prior_dist < 0.001)[0][-1] + xmin = 5*(int(AppMags[imodel][ixmin]/5.)) + + # sets this for each one - yes, it's random which is which, oh well! + plt.xlim(xmin,xmax) + + #cx,cy = make_cdf_for_plotting(ObsMags,weights=ObsPosteriors) + if addpriorlabel: + label = plotlabel[imodel]+": Prior" + else: + label = plotlabel[imodel] + + plt.plot(AppMags[imodel],prior_dists[imodel],label=label, + linestyle=linestyles[imodel%4]) + + + + if abc is not None: + plt.text(0.02,0.9,abc,fontsize=16, transform=plt.gcf().transFigure) + plt.legend(fontsize=12,loc="upper left") + plt.tight_layout() + plt.savefig(plotfile) + plt.close() + + return None + +def get_cand_properties(frblist): + """ + Load PATH candidate host galaxy properties for a list of FRBs. + + Reads the pre-generated PATH CSV files from the ``frb`` package data + directory (``frb/data/Galaxies/PATH/_PATH.csv``) and extracts + the columns ``['ang_size', 'mag', 'ra', 'dec', 'separation']`` for + each FRB. + + Args: + frblist (list of str): TNS FRB names (e.g. ``['FRB20180924B', ...]``). + + Returns: + all_candidates (list of pd.DataFrame): one DataFrame per FRB, + each with columns ``ang_size``, ``mag``, ``ra``, ``dec``, + and ``separation``. + """ + + all_candidates=[] + for i,name in enumerate(frblist): + + ######### Loads FRB, and modifes properties ######### + my_frb = FRB.by_name(name) + #this_path = frbassociate.FRBAssociate(my_frb, max_radius=10.) + + # reads in galaxy info + ppath = os.path.join(resources.files('frb'), 'data', 'Galaxies', 'PATH') + pfile = os.path.join(ppath, f'{my_frb.frb_name}_PATH.csv') + ptbl = pandas.read_csv(pfile) + candidates = ptbl[['ang_size', 'mag', 'ra', 'dec', 'separation']] + all_candidates.append(candidates) + return all_candidates + +def run_path(name,P_U=0.1,usemodel=False,sort=False): + """ + Run the PATH algorithm on a single FRB and return host association results. + + Loads the FRB object and its pre-generated PATH candidate table from the + ``frb`` package, applies colour corrections to convert candidate magnitudes + to r-band (using fixed offsets: I → R: +0.65, g → R: −0.65), sets up the + FRB localisation ellipse and offset prior, and evaluates PATH posteriors. + + The magnitude prior used for the candidates is: + + - ``usemodel=False``: PATH's built-in ``'inverse'`` prior (uniform in log + surface density). + - ``usemodel=True``: the ``'user'`` prior, which must be set externally by + pointing ``pathpriors.USR_raw_prior_Oi`` at a ``model_wrapper`` method + before calling this function (typically done by + ``wrapper.init_path_raw_prior_Oi``). + + The offset prior is always the ``'exp'`` model from PATH's ``'adopted'`` + standard priors, with scale 0.5 arcsec. + + Parameters + ---------- + name : str + TNS name of the FRB (e.g. ``'FRB20180924B'``). + P_U : float, optional + Prior probability that the true host galaxy is undetected. Defaults + to 0.1. + usemodel : bool, optional + If True, use the externally set user prior for candidate magnitudes. + Defaults to False. + sort : bool, optional + If True, sort the returned arrays by P(O|x) in ascending order. + Defaults to False. + + Returns + ------- + P_O : np.ndarray + Prior probability P(O_i) for each candidate host galaxy. + P_Ox : np.ndarray + Posterior probability P(O_i|x) for each candidate. + P_Ux : float + Posterior probability P(U|x) that the true host is undetected. + mags : np.ndarray + R-band apparent magnitudes of the candidates (after colour correction). + ptbl : pd.DataFrame + Full PATH candidate table loaded from the CSV file, with an + additional ``'frb'`` column set to ``name``. + """ + + ######### Loads FRB, and modifes properties ######### + my_frb = FRB.by_name(name) + this_path = frbassociate.FRBAssociate(my_frb, max_radius=10.) + + # reads in galaxy info + ppath = os.path.join(resources.files('frb'), 'data', 'Galaxies', 'PATH') + pfile = os.path.join(ppath, f'{my_frb.frb_name}_PATH.csv') + ptbl = pandas.read_csv(pfile) + + ngal = len(ptbl) + ptbl["frb"] = np.full([ngal],name) + + # Load prior + priors = load_std_priors() + prior = priors['adopted'] # Default + + theta_new = dict(method='exp', + max=priors['adopted']['theta']['max'], + scale=0.5) + prior['theta'] = theta_new + + # change this to something depending on the FRB DM + prior['U']=P_U + + candidates = ptbl[['ang_size', 'mag', 'ra', 'dec', 'separation']] + + # implements a correction to their relative magnitudes. + # note that order is R, then I, then g + if "VLT_FORS2_R" in ptbl: + mags = np.array(candidates.mag.values) + elif "VLT_FORS2_I" in ptbl: + mags = np.array(candidates.mag.values) + 0.65 + elif "VLT_FORS2_g" in ptbl: + mags = np.array(candidates.mag.values) - 0.65 + elif "GMOS_S_i" in ptbl: + mags = np.array(candidates.mag.values) + 0.65 + elif "LRIS_I" in ptbl: + mags = np.array(candidates.mag.values) + 0.65 + else: + raise ValueError("Cannot implement colour correction") + + + #this_path = PATH() + this_path.init_candidates(candidates.ra.values, + candidates.dec.values, + candidates.ang_size.values, + mag=mags) + this_path.frb = my_frb + + frb_eellipse = dict(a=np.abs(my_frb.sig_a), + b=np.abs(my_frb.sig_b), + theta=my_frb.eellipse['theta']) + + this_path.init_localization('eellipse', + center_coord=this_path.frb.coord, + eellipse=frb_eellipse) + + # this results in a prior which is uniform in log space + # when summed over all galaxies with the same magnitude + if usemodel: + this_path.init_cand_prior('user', P_U=prior['U']) + else: + this_path.init_cand_prior('inverse', P_U=prior['U']) + + # this is for the offset + this_path.init_theta_prior(prior['theta']['method'], + prior['theta']['max'], + prior['theta']['scale']) + + P_O=this_path.calc_priors() + + # Calculate p(O_i|x) + debug = True + P_Ox,P_Ux = this_path.calc_posteriors('fixed', + box_hwidth=10., + max_radius=10., + debug=debug) + + # mags already defined above + #mags = candidates['mag'] + + if sort: + indices = np.argsort(P_Ox) + P_O = P_O[indices] + P_Ox = P_Ox[indices] + mags = mags[indices] + + return P_O,P_Ox,P_Ux,mags,ptbl + diff --git a/zdm/optical_params.py b/zdm/optical_params.py index f1eef223..ace06943 100644 --- a/zdm/optical_params.py +++ b/zdm/optical_params.py @@ -1,21 +1,36 @@ -""" Classes for optical properties """ +""" +Classes for optical properties + +This philosophy here is to have a class of key parameters that relates to +a single class object contained within optical.py. The dataclasses are +used to set parameters that initialise their parent classes, which is +where all the complicated calculations are performed. +""" from dataclasses import dataclass, field from zdm import data_class import numpy as np + + +# Simple SFR model @dataclass -class Hosts(data_class.myDataClass): +class SimpleParams(data_class.myDataClass): + """ + Data class to hold the generic host galaxy class with no + pre-specified model + + """ # None of the fields should start with an X Absmin: float = field( - default=-30, + default=-25, metadata={'help': "Minimum host absolute magnitude", 'unit': 'M_r^{min}', 'Notation': '', }) Absmax: float = field( - default=0., + default=-15., metadata={'help': "Maximum host absolute magnitude", 'unit': 'M_r^{max}', 'Notation': '', @@ -32,6 +47,108 @@ class Hosts(data_class.myDataClass): 'unit': '', 'Notation': '', }) + AbsPriorMeth: int = field( + default=0, + metadata={'help': "Model for abs mag prior and function description. 0: uniform distribution. Others to be implemented.", + 'unit': '', + 'Notation': '', + }) + AppModelID: int = field( + default=0, + metadata={'help': "Model for converting absolute to apparent magnitudes. 0: no k-correction. 1: with k-correctionOthers to be implemented.", + 'unit': '', + 'Notation': '', + }) + AbsModelID: int = field( + default=1, + metadata={'help': "Model for describing absolute magnitudes. 0: Simple histogram of absolute magnitudes. 1: linear interpolation, 2: spline interpolation of histogram, 3: spline interpolation in log space", + 'unit': '', + 'Notation': '', + }) + k: float = field( + default=0., + metadata={'help': "k-correction", + 'unit': '', + 'Notation': 'k', + }) + + +# Nick Loudas's SFR model +@dataclass +class LoudasParams(data_class.myDataClass): + """ + Data class to hold the SFR model from Nick, which models + FRBs as some fraction of the star-formation rate. + """ + fSFR: float = field( + default=0.5, + metadata={'help': "Fraction of FRBs associated with star-formation", + 'unit': '', + 'Notation': '', + }) + NzBins: int = field( + default=10, + metadata={'help': "Number of redshift bins over which the histograms are calculated", + 'unit': '', + 'Notation': '', + }) + zmin: float = field( + default=0., + metadata={'help': "Minimum redshift over which pmag is calculated", + 'unit': '', + 'Notation': '', + }) + zmax: float = field( + default=0., + metadata={'help': "Maximum redshift over which pmag is calculated", + 'unit': '', + 'Notation': '', + }) + NMrBins: int = field( + default=0., + metadata={'help': "Number of magnitude bins", + 'unit': '', + 'Notation': '', + }) + Mrmin: float = field( + default=0., + metadata={'help': "Minimum absolute magnitude over which pmag is calculated", + 'unit': '', + 'Notation': '', + }) + Mrmax: float = field( + default=0., + metadata={'help': "Maximum magnitude over which pmag is calculated", + 'unit': '', + 'Notation': '', + }) + + +@dataclass +class Identification(data_class.myDataClass): + """ + Data class for holding parameters related to identifying galaxies in an image. + These describe the mean and deviation for the function pUgm in optical.py + that parameterises p(U|mr) + """ + pU_mean: float = field( + default=26.176, + metadata={'help': "Magnitude at which pU|mr is 0.5", + 'unit': '', + 'Notation': '', + }) + pU_width: float = field( + default=0.342, + metadata={'help': "Width of pU|mr distribution in ln space", + 'unit': '', + 'Notation': '', + }) + +@dataclass +class Apparent(data_class.myDataClass): + """ + # parameters for apparent mags - used by wrapper + """ Appmin: float = field( default=10, metadata={'help': "Minimum host apparent magnitude", @@ -50,21 +167,26 @@ class Hosts(data_class.myDataClass): 'unit': '', 'Notation': '', }) - AbsPriorMeth: int = field( - default=0, - metadata={'help': "Model for abs mag prior and function description. 0: uniform distribution. Others to be implemented.", - 'unit': '', - 'Notation': '', - }) - AppModelID: int = field( - default=0, - metadata={'help': "Model for converting absolute to apparent magnitudes. 0: no k-correction. Others to be implemented.", - 'unit': '', - 'Notation': '', - }) - AbsModelID: int = field( - default=0, - metadata={'help': "Model for describing absolute magnitudes. 0: Simple histogram of absolute magnitudes. 1: spline interpolation of histogram.", - 'unit': '', - 'Notation': '', - }) + +class OpticalState(data_class.myData): + """Initialize the full optical state dataset + with the default parameters + + """ + + def __init__(self): + self.set_dataclasses() + self.set_params() + + def set_dataclasses(self): + self.simple = SimpleParams() + self.loudas = LoudasParams() + self.app = Apparent() + self.id = Identification() + + + def update_param(self, param:str, value): + # print(self.params) + DC = self.params[param] + setattr(self[DC], param, value) + diff --git a/zdm/parameters.py b/zdm/parameters.py index 289e40cd..37f162c1 100644 --- a/zdm/parameters.py +++ b/zdm/parameters.py @@ -165,7 +165,7 @@ class FRBDemoParams(data_class.myDataClass): ) lC: float = field( default=3.3249, - metadata={"help": "log10 constant in number per Gpc^-3 day^-1 at z=0"}, + metadata={"help": "log10 constant in number per Mpc^-3 day^-1 at z=0"}, ) @@ -193,8 +193,8 @@ class RepeatParams(data_class.myDataClass): }) RC: float = field( default = 1e-2, - metadata={'help': 'Constant repeater density', - 'unit': 'Repeaters day / Gpc^-3', + metadata={'help': 'Constant repeater density. Gets calculated by code.', + 'unit': 'Repeaters / Mpc^-3', 'Notation': '$C_R$', }) RE0: float = field( @@ -504,6 +504,7 @@ def set_dataclasses(self): self.IGM = IGMParams() self.energy = EnergeticsParams() self.rep = RepeatParams() + self.photo=PhotometricParams() def update_param(self, param: str, value): """Update a single parameter by name. @@ -542,3 +543,15 @@ def set_astropy_cosmo(self, cosmo): self.cosmo.Omega_b = cosmo.Ob0 self.cosmo.Omega_b_h2 = cosmo.Ob0 * (cosmo.H0.value / 100.0) ** 2 return + +################################################################################ +@dataclass +class PhotometricParams(data_class.myDataClass): + + smearing:bool =field(default=False) + + sigma:float =field(default=0.035) + + sigma_width:int =field(default=6) + + diff --git a/zdm/scripts/Path/estimate_path_priors.py b/zdm/scripts/Path/estimate_path_priors.py index 133742ac..f4c81bf6 100644 --- a/zdm/scripts/Path/estimate_path_priors.py +++ b/zdm/scripts/Path/estimate_path_priors.py @@ -1,10 +1,39 @@ """ -Script showing how to use zDM as priors for CRAFT -host galaxy magnitudes. +Estimate zdm-informed PATH priors for CRAFT/ICS FRB host galaxies. -It requirses the FRB and astropath modules to be installed. +This script demonstrates how to incorporate zdm-derived p(z|DM) predictions +as priors for the PATH (Probabilistic Association of Transients to their Hosts) +algorithm applied to CRAFT ICS FRBs. -This does NOT include optimisation of any parameters +For each FRB in the CRAFT ICS sample (`opt.frblist`), the script runs PATH +twice and compares results: + +1. **Baseline run**: PATH with a flat (uninformative) prior on host galaxy + apparent magnitude, and a fixed prior P_U=0.1 on the host being below + the detection threshold. + +2. **zdm-informed run**: PATH using a physically motivated prior on host + apparent magnitude derived from the Marnoch+2023 host galaxy luminosity + model combined with the zdm p(z|DM_EG) probability distribution. The + probability P_U that the true host is undetected is also estimated from + the model rather than set by hand. + +The output is a weighted histogram of posterior host galaxy apparent +magnitudes (P_Ox) across all FRBs, saved to ``posterior_pOx.png``. + +Note: This script does NOT optimise any zdm or host galaxy model parameters. +It uses the CRAFT_ICS_1300 survey grid with default zdm parameter values. + +Requirements +------------ +- ``astropath`` package (PATH implementation) +- ``frb`` package (FRB utilities and optical data) +- PATH-compatible optical data for each FRB in ``opt.frblist`` + +References +---------- +- Marnoch et al. 2023, MNRAS 525, 994 (host galaxy luminosity model) +- Macquart et al. 2020 (Macquart relation / p(DM|z)) """ #standard Python imports @@ -13,20 +42,50 @@ # imports from the "FRB" series from zdm import optical as opt +from zdm import optical_numerics as on from zdm import loading from zdm import cosmology as cos from zdm import parameters from zdm import loading +from zdm import frb_lists as lists import astropath.priors as pathpriors def calc_path_priors(): """ - Loops over all ICS FRBs + Run PATH on all CRAFT ICS FRBs with and without zdm-derived priors. + + Initialises a zdm grid for the CRAFT_ICS_1300 survey and the Marnoch+2023 + host galaxy luminosity model. For each FRB in ``frblist.icslist``: + + - Matches the FRB to the CRAFT_ICS_1300 survey to retrieve its + extragalactic dispersion measure (DM_EG). + - Runs PATH with a flat apparent-magnitude prior and fixed P_U=0.1 + (``usemodel=False``), giving baseline posteriors P_Ox1. + - Uses the zdm model to compute a physically motivated prior on apparent + host magnitude, p(m_r | DM_EG), via ``wrapper.init_path_raw_prior_Oi``, + and estimates P_U from the fraction of the magnitude prior that falls + below the survey detection limit via ``wrapper.estimate_unseen_prior``. + - Runs PATH again with the zdm-derived prior (``usemodel=True``) to give + updated posteriors P_Ox2. + + After processing all FRBs, produces a weighted histogram of the posterior + host apparent magnitudes (P_Ox2) across the whole sample and saves it to + ``posterior_pOx.png``. + + Notes + ----- + FRBs not found in the CRAFT_ICS_1300 survey (e.g. because they were + detected by a different instrument configuration) are skipped with a + warning. + + The zdm model parameters are held fixed at their default values; no + parameter optimisation is performed here. See + ``optimise_host_priors.py`` for the equivalent script with optimisation. """ - frblist = opt.frblist + frblist = lists.icslist NFRB = len(frblist) @@ -34,21 +93,18 @@ def calc_path_priors(): state = parameters.State() cos.set_cosmology(state) cos.init_dist_measures() - model = opt.host_model() + model = opt.marnoch_model() name='CRAFT_ICS_1300' ss,gs = loading.surveys_and_grids(survey_names=[name]) g = gs[0] s = ss[0] # must be done once for any fixed zvals - model.init_zmapping(g.zvals) + wrapper = opt.model_wrapper(model,g.zvals) # do this only for a particular FRB # it gives a prior on apparent magnitude and pz #AppMagPriors,pz = model.get_posterior(g,DMlist) - # do this once per "model" objects - pathpriors.USR_raw_prior_Oi = model.path_raw_prior_Oi - allmags = None allPOx = None @@ -68,12 +124,16 @@ def calc_path_priors(): DMEG = s.DMEGs[imatch] + # + # original calculation - P_O1,P_Ox1,P_Ux1,mags1 = opt.run_path(frb,model,usemodel=False,PU=0.1) + P_O1,P_Ox1,P_Ux1,mags1,ptbl = on.run_path(frb,usemodel=False,P_U=0.1) + + # initialises wrapper to give p(mr|DMEG) for p(z|DM) grid predictions + wrapper.init_path_raw_prior_Oi(DMEG,g) + PU = wrapper.estimate_unseen_prior() - model.init_path_raw_prior_Oi(DMEG,g) - PU = model.estimate_unseen_prior(mag_limit=26) # might not be correct - P_O2,P_Ox2,P_Ux2,mags2 = opt.run_path(frb,model,usemodel=True,PU = PU) + P_O2,P_Ox2,P_Ux2,mags2,ptbl = on.run_path(frb,usemodel=True,P_U = PU) if False: # compares outcomes @@ -95,8 +155,8 @@ def calc_path_priors(): allmags = np.append(allmags,mags2) allPOx = np.append(allPOx,P_Ox2) - Nbins = int(model.Appmax - model.Appmin)+1 - bins = np.linspace(model.Appmin,model.Appmax,Nbins) + Nbins = int(wrapper.Appmax - wrapper.Appmin)+1 + bins = np.linspace(wrapper.Appmin,wrapper.Appmax,Nbins) plt.figure() plt.hist(allmags,weights = allPOx, bins = bins,label="Posterior") plt.legend() diff --git a/zdm/scripts/Path/optimise_host_priors.py b/zdm/scripts/Path/optimise_host_priors.py index 279f9728..bc393f3b 100644 --- a/zdm/scripts/Path/optimise_host_priors.py +++ b/zdm/scripts/Path/optimise_host_priors.py @@ -1,25 +1,58 @@ """ -This file illustrates how to optimise the host prior -distribution by fitting to CRAFT ICS optical observations. -It fits a model of absolute galaxy magnitude distributions, -uses zDM to predict redshifts and hence apparent magntidues, -runs PATH using that prior, and tries to get priors to match posteriors. +Optimise FRB host galaxy magnitude priors using zdm predictions and PATH. -WARNING: this is NOT the optimal method! That would require using -a catalogue of galaxies to sample from to generate fake opotical fields. -But nonetheless, this tests the power of estimating FRB host galaxy -contributions using zDM to set priors for apparent magnitudes. +This script fits a parametric model of FRB host galaxy absolute magnitude +distributions to the CRAFT ICS optical observations. It works by: -WARNING2: To do this properly also requires inputting the posterior POx -for host galaxies into zDM! This simulation does not do that either. +1. Initialising zdm grids for the three CRAFT ICS survey bands (892, 1300, + and 1632 MHz) using the HoffmannHalo25 parameter state. +2. Constructing a host galaxy model (``simple`` or ``loudas``) that predicts + apparent r-band magnitudes by convolving the absolute magnitude distribution + with the zdm p(z|DM_EG) redshift prior, optionally including a k-correction. +3. Running PATH with those zdm-derived apparent magnitude priors to obtain + posterior host association probabilities P_Ox for each CRAFT ICS FRB. +4. Optimising the model parameters with ``scipy.optimize.minimize`` by + minimising either a maximum-likelihood statistic or a KS-like goodness-of-fit + statistic against the observed PATH posteriors. -WARNING3: this program takes O~1 hr to run +After optimisation the script: + +- Saves the best-fit parameters to ``_output/best_fit_params.npy``. +- Plots the predicted vs observed apparent magnitude distributions for the + best-fit model (``best_fit_apparent_magnitudes.png``). +- Re-runs PATH with the original (flat) priors for comparison and produces a + scatter plot of best-fit vs original posteriors + (``Scatter_plot_comparison.png``). + +Limitations +----------- +- The optimal approach would sample galaxy candidates from a real photometric + catalogue to construct proper optical fields; this script uses a parametric + model instead. +- Host identification posteriors (P_Ox) are not fed back into the zdm + likelihood; a self-consistent joint fit is not performed. +- Runtime can be significant when optimising the ``simple`` model (10 free + parameters by default). + +Usage +----- +Set ``minimise = True`` (default) to run the optimiser, or ``False`` to load +previously saved parameters from ``_output/best_fit_params.npy``. +Switch between host models by changing ``modelname`` to ``"simple"`` or +``"loudas"``. + +Requirements +------------ +- ``astropath`` package (PATH implementation) +- ``frb`` package (FRB utilities and optical data) """ #standard Python imports +import os import numpy as np from matplotlib import pyplot as plt +from scipy.optimize import minimize # imports from the "FRB" series from zdm import optical as opt @@ -28,72 +61,165 @@ from zdm import cosmology as cos from zdm import parameters from zdm import loading +from zdm import optical_numerics as on +from zdm import states +from zdm import frb_lists as lists +# other FRB library imports import astropath.priors as pathpriors -from scipy.optimize import minimize + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) def main(): """ - Main function - Contains outer loop to iterate over parameters - + Optimise host galaxy magnitude model parameters and compare with baseline PATH. + + Workflow: + + 1. Load the CRAFT ICS FRB list and initialise zdm grids for the 892, 1300, + and 1632 MHz survey bands using the HoffmannHalo25 cosmological/FRB state. + 2. Select a host magnitude model (``"simple"`` or ``"loudas"``) and configure + its parameter bounds and initial values. + 3. If ``minimise=True``, call ``scipy.optimize.minimize`` with + ``on.function`` as the objective, minimising either the maximum-likelihood + statistic (``istat=1``) or the KS-like statistic (``istat=0``) over all + CRAFT ICS FRBs. Best-fit parameters are saved to + ``_output/best_fit_params.npy``. + 4. Re-evaluate PATH at the best-fit parameters and compute both the + likelihood and KS statistics; save the apparent magnitude comparison + plot to ``_output/best_fit_apparent_magnitudes.png``. + 5. Re-run PATH with the original flat priors (``usemodel=False``) and save + a scatter plot comparing original vs best-fit P_Ox posteriors to + ``_output/Scatter_plot_comparison.png``. + + Configuration knobs (edit at the top of the function body): + + - ``istat``: 0 = KS statistic, 1 = maximum-likelihood statistic. + - ``dok``: whether to include a k-correction in the apparent magnitude model. + - ``modelname``: ``"simple"`` for the parametric histogram model or + ``"loudas"`` for the Loudas single-parameter model. + - ``POxcut``: optional float (e.g. 0.9) to exclude low-confidence FRBs + from the model comparison. + - ``minimise``: set to ``False`` to skip optimisation and load saved + parameters instead. """ ######### List of all ICS FRBs for which we can run PATH ####### - # hard-coded list of FRBs with PATH data in ice paper - frblist=opt.frblist + # hard-coded list of FRBs with PATH data in ICE paper + frblist = lists.icslist # Initlisation of zDM grid # Eventually, this should be part of the loop, i.e. host IDs should # be re-fed into FRB surveys. However, it will be difficult to do this # with very limited redshift estimates. That might require posterior # estimates of redshift given the observed galaxies. Maybe. - state = parameters.State() + state = states.load_state("HoffmannHalo25",scat=None,rep=None) + cos.set_cosmology(state) cos.init_dist_measures() + + # loads zDM grids names=['CRAFT_ICS_892','CRAFT_ICS_1300','CRAFT_ICS_1632'] - ss,gs = loading.surveys_and_grids(survey_names=names) + ss,gs = loading.surveys_and_grids(survey_names=names,init_state=state) + + ######## Determnine which statistic to use in optimisation ######## + # setting istat=0 means using a ks statistic to fit p(m_r) + # setting istat=1 means using a maximum likelihood estimator + istat=1 + dok = True # turn on k-correction or not + + # determines which model to use + #modelname = "loudas" + modelname = "simple" + + opdir = modelname+"_output/" + POxcut = None # set to e.g. 0.9 to reject FRBs with lower posteriors when doing model comparisons + + if not os.path.exists(opdir): + os.mkdir(opdir) + + # Case of simple host model # Initialisation of model - opt_params = op.Hosts() - opt_params.AbsModelID = 1 - model = opt.host_model(opstate = opt_params) - model.init_zmapping(gs[0].zvals) + # simple host model + if modelname=="simple": + opstate = op.OpticalState() + + if dok: + Nparams = opstate.simple.NModelBins+1 + opstate.simple.AppModelID = 1 # sets to include k-correction + opstate.simple.k = 0.5 # for some reason, this just doesn't make much difference to results + bounds = [(-25,25)]+[(0,1)]*(Nparams-1) + else: + Nparams = opstate.simple.NModelBins + # bins now give log-space values, hence -5,2 is range of 10^7 + if opstate.simple.AbsModelID == 3: + base=(-5,2) # log space + else: + base=(0,1) # linear space + bounds = [base]*(Nparams) + opstate.simple.AppModelID = 0 # no k-correction + + model = opt.simple_host_model(opstate) + x0 = model.get_args() + + + elif modelname=="loudas": + #### case of Loudas model + model = opt.loudas_model() + x0 = [0.5] + bounds=[(-3,3)] # large range + else: + print("Unrecognised host model ", modelname) - x0 = model.AbsPrior - args=[frblist,ss,gs,model] - Nparams = len(x0) - bounds = [(0,1)]*Nparams - result = minimize(function,x0 = x0,args=args,bounds = bounds) + # initialise aguments to minimisation function + args=[frblist,ss,gs,model,POxcut,istat] - # Recording the current spline best-fit here - #x = [0.00000000e+00 0.00000000e+00 7.05155614e-02 8.39235326e-01 - # 3.27794398e-01 1.00182186e-03 0.00000000e+00 3.46702511e-04 - # 2.17040011e-03 9.72472750e-04] + # "function" is the function that performs the comparison of + # predictions to outcomes. It's where all the magic happens - # recording the current non-spline best fit here - #x = [ 1.707e-04, 8.649e-02, 9.365e-01, 9.996e-01, 2.255e-01,\ - # 3.493e-02, 0.000e+00, 0.000e+00, 0.000e+00, 1.000e-01] - #x = np.array(x) + minimise=True + if minimise: + result = minimize(on.function,x0 = x0,args=args,bounds = bounds) + print("Best fit result is ",result.x) + x = result.x + # saves result + np.save(opdir+"/best_fit_params.npy",x) + else: + x = np.load(opdir+"/best_fit_params.npy") - print("Best fit result is ",result.x) - x = result.x + # initialises arguments + model.init_args(x) + + outfile = opdir+"best_fit_apparent_magnitudes.png" + wrappers = on.make_wrappers(model,gs) + NFRB,AppMags,AppMagPriors,ObsMags,ObsPriors,ObsPosteriors,PUprior,PUobs,sumPUprior,sumPUobs,frbs,dms = on.calc_path_priors(frblist,ss,gs,wrappers,verbose=False) + + # calculates a maximum-likelihood statistic + stat = on.calculate_likelihood_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUobs,PUprior,plotfile=outfile) + + # calculates a KS-like statistic + stat = on.calculate_ks_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,sumPUobs,sumPUprior,plotfile=outfile) - # analyses final result - x /= np.sum(x) - model.AbsPrior = x - model.reinit_model() - outfile = "best_fit_apparent_magnitudes.png" - NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUprior,PUobs,sumPUprior,sumPUobs = calc_path_priors(frblist,ss,gs,model,verbose=False) - stat = calculate_goodness_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,sumPUobs,sumPUprior,plotfile=outfile) # calculates the original PATH result - #outfile = "original_fit_apparent_magnitudes.png" - NFRB2,AppMags2,AppMagPriors2,ObsMags2,ObsPosteriors2,PUprior2,PUobs2,sumPUprior2,sumPUobs2 = calc_path_priors(frblist,ss,gs,model,verbose=False,usemodel=False) - #stat = calculate_goodness_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,sumPUobs,sumPUprior,plotfile=outfile) + outfile = opdir+"original_fit_apparent_magnitudes.png" + NFRB2,AppMags2,AppMagPriors2,ObsMags2,ObsPriors2,ObsPosteriors2,PUprior2,PUobs2,sumPUprior2,sumPUobs2,frbs,dms = on.calc_path_priors(frblist,ss,gs,wrappers,verbose=False,usemodel=False) + # currently, calculating KS statistics does not work/make sense for original path. need to re-think this + #stat = on.calculate_ks_statistic(NFRB,AppMags,AppMagPriors2,ObsMags2,ObsPosteriors2,sumPUobs2,sumPUprior2,plotfile=outfile) + # flattens lists of lists + ObsPosteriors = [x for xs in ObsPosteriors for x in xs] + ObsPosteriors2 = [x for xs in ObsPosteriors2 for x in xs] # plots original vs updated posteriors plt.figure() @@ -105,64 +231,35 @@ def main(): plt.scatter(PUobs2,PUobs,label="Unobserved",marker='+') plt.legend() plt.tight_layout() - plt.savefig("Scatter_plot_comparison.png") + plt.savefig(opdir+"Scatter_plot_comparison.png") plt.close() - - - # plots final result on absolute magnitudes - plt.figure() - plt.xlabel("Absolute magnitude, $M_r$") - plt.ylabel("$p(M_r)$") - plt.plot(model.AbsMags,model.AbsMagWeights/np.max(model.AbsMagWeights),label="interpolation") - plt.plot(model.ModelBins,x/np.max(x),marker="o",linestyle="",label="Model Parameters") - plt.legend() - plt.tight_layout() - plt.savefig("best_fit_absolute_magnitudes.pdf") - plt.close() -def function(x,args): +def make_cdf_for_plotting(xvals, weights=None): """ - function to be minimised - """ - frblist = args[0] - ss = args[1] - gs=args[2] - model=args[3] - - # initialises model to the priors - # technically, there is a redundant normalisation here - model.AbsPrior = x - - NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUprior,PUobs,sumPUprior,sumPUobs = calc_path_priors(frblist,ss,gs,model,verbose=False) - - # we re-normalise the sum of PUs by NFRB - - # prevents infinite plots being created - stat = calculate_goodness_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,sumPUobs,sumPUprior,plotfile=None) - - return stat - - -def make_cdf(xs,ys,ws,norm = True): - """ - makes a cumulative distribution in terms of - the x-values x, observed values y, and weights w - - """ - cdf = np.zeros([xs.size]) - for i,y in enumerate(ys): - OK = np.where(xs > y)[0] - cdf[OK] += ws[i] - if norm: - cdf /= cdf[-1] - return cdf + Build a step-function CDF suitable for plotting. -def make_cdf_for_plotting(xvals,weights=None): - """ - Creates a cumulative distribution function - - xvals,yvals: values of data points + Converts an array of data values (and optional weights) into paired + (x, y) arrays that trace the cumulative distribution as a staircase, + with two points per input value so that horizontal steps are rendered + correctly by matplotlib. + + Parameters + ---------- + xvals : np.ndarray + 1-D array of data values. Will be sorted in ascending order. + weights : np.ndarray, optional + 1-D array of weights with the same length as ``xvals``. If provided, + the CDF is computed as the normalised cumulative sum of the sorted + weights. If ``None``, a uniform CDF over ``N`` points is used, + with steps at ``0, 1/N, 2/N, ..., 1``. + + Returns + ------- + cx : np.ndarray + x-coordinates of the staircase CDF (length ``2 * N``). + cy : np.ndarray + y-coordinates of the staircase CDF (length ``2 * N``). """ N = xvals.size cx = np.zeros([2*N]) @@ -184,151 +281,8 @@ def make_cdf_for_plotting(xvals,weights=None): cy[2*i+1] = weights[i+1] return cx,cy -def calculate_goodness_statistic(NFRB,AppMags,AppMagPriors,ObsMags,ObsPosteriors,PUobs,PUprior,plotfile=None): - """ - Calculates a ks-like statistics to be proxzy for goodness-of-fit - We must set each AppMagPriors to 1.-PUprior at the limiting magnitude for each observation, - and sum the ObsPosteriors to be equal to 1.-PUobs at that magnitude. - Then these are what gets summed. - - This can be readily done by combining all ObsMags and ObsPosteriors into a single long list, - since this should already be correctly normalised. Priors require their own weight. - - Inputs: - AppMags: array listing apparent magnitudes - AppMagPrior: array giving prior on AppMags - ObsMags: list of observed magnitudes - ObsPosteriors: list of posterior values corresponding to ObsMags - PUobs: posterior on unseen probability - PUprior: prior on PU - Plotfile: set to name of output file for comparison plot - - Returns: - k-like statistic of biggest obs/prior difference - """ - - # we calculate a probability using a cumulative distribution - prior_dist = np.cumsum(AppMagPriors) - - # the above is normalised to NFRB. We now divide it by this - # might want to be careful here, and preserve this normalisation - prior_dist /= NFRB #((NFRB-PUprior)/NFRB) / prior_dist[-1] - - - obs_dist = make_cdf(AppMags,ObsMags,ObsPosteriors,norm=False) - - obs_dist /= NFRB - - # we calculate something like the k-statistic. Includes NFRB normalisation - diff = obs_dist - prior_dist - stat = np.max(np.abs(diff)) - - if plotfile is not None: - plt.figure() - plt.xlabel("Apparent magnitude $m_r$") - plt.ylabel("Cumulative host galaxy distribution") - #cx,cy = make_cdf_for_plotting(ObsMags,weights=ObsPosteriors) - plt.plot(AppMags,obs_dist,label="Observed") - plt.plot(AppMags,prior_dist,label="Prior") - plt.legend() - plt.tight_layout() - plt.savefig(plotfile) - plt.close() - - - return stat - -def calc_path_priors(frblist,ss,gs,model,verbose=True,usemodel=True): - """ - Inner loop. Gets passed model parameters, but assumes everything is - initialsied from there. - - Inputs: - FRBLIST: list of FRBs to retrieve data for - ss: list of surveys modelling those FRBs (searches for FRB in data) - gs: list of zDM grids modelling those surveys - model: host_model class object used to calculate priors on magnitude - verbose (bool): guess - - Returns: - Number of FRBs fitted - AppMags: list of apparent magnitudes used internally in the model - allMagPriors: summed array of magnitude priors calculated by the model - allObsMags: list of observed magnitudes of candidate hosts - allPOx: list of posterior probabilities calculated by the model - allPU: summed values of unobserved prior - allPUx: summed values of posterior of being unobserved - """ - - NFRB = len(frblist) - - # we assume here that the model has just had a bunch of parametrs updated - # within it. Must be done once for any fixed zvals. If zvals change, - # then we have another issue - model.reinit_model() - - # do this once per "model" objects - pathpriors.USR_raw_prior_Oi = model.path_raw_prior_Oi - - allObsMags = None - allPOx = None - allpriors = None - AppMags = model.AppMags - sumPU = 0. - sumPUx = 0. - allPU = [] - allPUx = [] - nfitted = 0 + - for i,frb in enumerate(frblist): - # interates over the FRBs. "Do FRB" - # P_O is the prior for each galaxy - # P_Ox is the posterior - # P_Ux is the posterior for it being unobserved - # mags is the list of galaxy magnitudes - - # determines if this FRB was seen by the survey, and - # if so, what its DMEG is - for j,s in enumerate(ss): - imatch = opt.matchFRB(frb,s) - if imatch is not None: - # this is the survey to be used - g=gs[j] - break - - if imatch is None: - if verbose: - print("Could not find ",frb," in any survey") - continue - - nfitted += 1 - - DMEG = s.DMEGs[imatch] - # this is where the particular survey comes into it - MagPriors = model.init_path_raw_prior_Oi(DMEG,g) - mag_limit=26 # might not be correct - PU = model.estimate_unseen_prior(mag_limit) - bad = np.where(AppMags > mag_limit)[0] - MagPriors[bad] = 0. - - P_O,P_Ox,P_Ux,ObsMags = opt.run_path(frb,model,usemodel=usemodel,PU = PU) - - ObsMags = np.array(ObsMags) - - if allObsMags is None: - allObsMags = ObsMags - allPOx = P_Ox - allMagPriors = MagPriors - else: - allObsMags = np.append(allObsMags,ObsMags) - allPOx = np.append(allPOx,P_Ox) - allMagPriors += MagPriors - - sumPU += PU - sumPUx += P_Ux - allPU.append(PU) - allPUx.append(P_Ux) - return nfitted,AppMags,allMagPriors,allObsMags,allPOx,allPU,allPUx,sumPU,sumPUx main() diff --git a/zdm/scripts/Path/plot_host_models.py b/zdm/scripts/Path/plot_host_models.py new file mode 100644 index 00000000..eab95db5 --- /dev/null +++ b/zdm/scripts/Path/plot_host_models.py @@ -0,0 +1,446 @@ +""" +Plot and compare FRB host galaxy magnitude models against CRAFT ICS PATH posteriors. + +This script demonstrates how to load, configure, and visualise the three +available FRB host galaxy magnitude models, then evaluate PATH host association +posteriors for all CRAFT ICS FRBs using each model in turn. + +Model comparison +---------------- +Three host magnitude models are loaded and plotted: + +- **Simple model** (``opt.simple_host_model``): a parametric histogram of + absolute magnitudes M_r, linearly interpolated, with an optional + k-correction. Parameters are set by hand here (not from a fitted result). +- **Loudas model** (``opt.loudas_model``): predicts apparent magnitudes from + a galaxy luminosity function weighted by stellar mass (``fSFR=0``) or + star-formation rate (``fSFR=1``), based on Nick Loudas's galaxy model. + The mixing parameter ``fSFR`` is varied to show sensitivity. +- **Marnoch model** (``opt.marnoch_model``): predicts apparent magnitudes + from the galaxy spectral evolution model of Marnoch et al. 2023 + (MNRAS 525, 994). + +Diagnostic plots produced in ``Plots/`` +----------------------------------------- +- ``simple_model_mags.png``: absolute magnitude prior p(M_r) for the simple + model, showing the interpolated curve and the raw histogram bin values. +- ``loudas_model_mags.png``: apparent magnitude distributions p(m_r) for the + Loudas model at several redshifts, comparing mass- vs SFR-weighted variants. +- ``loudas_fsfr_interpolation.png``: sensitivity of the Loudas model to the + ``fSFR`` mixing parameter at z=0.5, illustrating the full allowed range. +- ``all_model_apparent_mags.png``: side-by-side comparison of p(m_r | z) for + all three models at z = 0.1, 0.5, and 2.0. +- ``all_model_mag_priors_dm.png``: PATH apparent magnitude priors p(m_r | DM) + for all three models at DM = 200, 600, and 1000 pc/cm³, using the + CRAFT_ICS_1300 zdm grid to convert DM to a redshift prior. +- ``posterior_comparison.png``: scatter plot of PATH host posteriors P(O|x) + from the original flat-prior run vs each of the four zdm-informed model + runs, across all CRAFT ICS FRBs. + +Note +---- +Parameter values used here are illustrative initial estimates, not best-fit +results from the published analysis. See ``optimise_host_priors.py`` for the +fitting procedure. + +Requirements +------------ +- ``astropath`` package (PATH implementation) +- ``frb`` package (FRB utilities and optical data) +""" + +#standard Python imports +import os +import numpy as np +from matplotlib import pyplot as plt + +# imports from the "FRB" series +from zdm import optical as opt +from zdm import optical_params as op +from zdm import optical_numerics as on +from zdm import loading +from zdm import cosmology as cos +from zdm import parameters +from zdm import loading +from zdm import frb_lists as lists + +import astropath.priors as pathpriors + + +import matplotlib + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + + +def calc_path_priors(): + """ + Generate diagnostic plots for all host models and compare PATH posteriors. + + Workflow: + + 1. **Model initialisation**: Loads the simple, Loudas (mass-weighted, + ``fSFR=0``), and Marnoch host magnitude models with illustrative + parameter values. + + 2. **Intrinsic magnitude plots**: Plots the absolute magnitude prior + p(M_r) for the simple model and apparent magnitude distributions + p(m_r) for the Loudas model at several redshifts. + + 3. **fSFR sensitivity**: Plots p(m_r | z=0.5) for the Loudas model + across a wide range of ``fSFR`` values to illustrate model behaviour + beyond the physically motivated [0, 1] range. + + 4. **Model comparison at fixed z**: Compares p(m_r | z) across all three + models at z = 0.1, 0.5, and 2.0. + + 5. **DM-dependent priors**: Loads the CRAFT_ICS_1300 zdm grid and wraps + each model in a ``model_wrapper`` to compute the PATH apparent magnitude + prior p(m_r | DM) at DM = 200, 600, and 1000 pc/cm³. + + 6. **PATH evaluation over CRAFT ICS FRBs**: For each FRB in + ``data/optical/frb_lists.icslist`` that is found in the CRAFT_ICS_1300 survey: + + - Runs PATH with a flat prior (``usemodel=False``, P_U=0.1) as the + baseline. + - Runs PATH four more times, one per zdm-informed model variant + (simple; Loudas mass-weighted; Loudas SFR-weighted; Marnoch), each + with P_U estimated from ``wrapper.estimate_unseen_prior()``. + - Prints diagnostic output for candidate host galaxies that flip above + P_Ox=0.5 relative to the baseline (simple model only). + + 7. **Posterior scatter plot**: Produces ``Plots/posterior_comparison.png`` + showing P(O|x) from each zdm-informed model against the flat-prior + baseline across all FRBs. + + Output files are written to the ``Plots/`` subdirectory, which is created + if it does not already exist. + """ + + opdir = "Plots/" + if not os.path.exists(opdir): + os.mkdir(opdir) + ##### performs the following calculations for the below combinations ###### + + # loads a default optical state. + opstate1 = op.OpticalState() + #opstate1.SimpleParams.AbsPrior = [0,0,0.1,1,0.4,0,0,0,0,0] # this is from an initial estimate + + opstate2 = op.OpticalState() + opstate2.loudas.fSFR=0. + + + ######## initialises optical-independent info ######## + #frblist is a hard-coded list of FRBs for which we have optical PATH data + frblist = lists.icslist + NFRB = len(frblist) + + + state = parameters.State() + cos.set_cosmology(state) + cos.init_dist_measures() + + + plt.figure() + + opstate = op.SimpleParams() + + ##### makes a plot of host priors for the simple model ###### + + # simple host model + model1 = opt.simple_host_model(opstate1) + # this is from an initial estimate. Currently, no way to enter this into the opstate. To do. + absprior = [0,0,0.1,1,0.4,0,0,0,0,0] + model1.init_args(absprior) + + plt.figure() + plt.plot(model1.AbsMags,model1.AbsMagWeights/np.max(model1.AbsMagWeights),label="Histogram interpolation") + plt.scatter(model1.ModelBins,model1.AbsPrior/np.max(model1.AbsPrior),label="Simple model points") + plt.xlabel("Absolute magnitude $M_r$") + plt.ylabel("$p(M_r)$") + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"simple_model_mags.png") + plt.close() + + ####### Makes plots for Nick Loudas' model ##### + + model2=opt.loudas_model(opstate2) + for i in np.arange(1,20,4): + plt.plot(model2.rmags,model2.p_mr_mass[i],label="$M_\\odot$, z="+str(model2.zbins[i]),linestyle="-") + plt.plot(model2.rmags,model2.p_mr_sfr[i],label="SFR, z="+str(model2.zbins[i]),linestyle="--",color=plt.gca().lines[-1].get_color()) + plt.xlabel("apparent magnitude $m_r$") + plt.ylabel("$p(m_r)$") + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"loudas_model_mags.png") + plt.close() + + + ###### Gives examples for fsfr at weird values ##### + + plt.figure() + plt.xlabel("$m_r$") + plt.ylabel("$P(m_r | z=0.5)$") + styles=["-","--",":","-.","-","--",":","-."] + fsfrs = np.linspace(-2,3,6) # extrapolates to weird values + z=0.5 + mrbins = np.linspace(0,40,401) + rbc = (mrbins[1:] + mrbins[:-1])/2. + for i,fsfr in enumerate(fsfrs): + model2.init_args(fsfr) + pmr = model2.get_pmr_gz(mrbins,z) + pmr /= np.sum(pmr)*(rbc[1]-rbc[0]) + plt.plot(rbc,pmr,label="$f_{\\rm sfr} = $"+str(fsfr)[:5],linestyle=styles[i]) + plt.xlim(15,30) + plt.ylim(0.,0.6) + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"loudas_fsfr_interpolation.png") + plt.close() + + # set up basic histogram of p(mr) distribution + mrbins = np.linspace(0,40,401) + mrvals=(mrbins[:-1]+mrbins[1:])/2. + dmr = mrbins[1]-mrbins[0] + + model3 = opt.marnoch_model() + + ######### Plots apparent mag distribution for all models as function of z ####### + styles=["-","--",":","-."] + + plt.figure() + flist=[0,1] + + for i,z in enumerate([0.1,0.5,2.0]): + + # simple model + pmr = model1.get_pmr_gz(mrbins,z) + pmr /= np.sum(pmr) + + plt.plot(mrvals,pmr/dmr,label="z = "+str(z)+"; Naive",linestyle=styles[0]) + + # Loudas model dependencies + for i,fsfr in enumerate(flist): + model2.init_args(fsfr) + pmr = model2.get_pmr_gz(mrbins,z) + pmr /= np.sum(pmr) + plt.plot(mrvals,pmr/dmr,label = " $f_{\\rm sfr}$ = "+str(fsfr), + linestyle=styles[i+1],color=plt.gca().lines[-1].get_color()) + + pmr = model3.get_pmr_gz(mrbins,z) + plt.plot(mrvals,pmr/dmr,label = " Marnoch",linestyle=styles[3], + color=plt.gca().lines[-1].get_color()) + + plt.xlabel("Apparent magnitude $m_r$") + plt.ylabel("$p(m_r|z)$") + plt.xlim(10,40) + plt.ylim(0,0.35) + plt.tight_layout() + plt.legend() + plt.savefig(opdir+"all_model_apparent_mags.png") + plt.close() + + + ############################################################################ + #Load a grid. We'll only load data from the ICS 1300 survey. Just to use it + # as an example calculation for particular DMs + ############################################################################ + name='CRAFT_ICS_1300' + ss,gs = loading.surveys_and_grids(survey_names=[name]) + g = gs[0] + s = ss[0] + + + # wrapper around the optical model. For returning p(m_r|DM) + wrapper1 = opt.model_wrapper(model1,g.zvals) # simple + wrapper2 = opt.model_wrapper(model2,g.zvals) # loudas with fsfr=0 + wrapper3 = opt.model_wrapper(model3,g.zvals) # loudas with fsfr=0 + + # simply illustrates how one might change the probabilities of + # observaing a galaxy of a given magnitude + wrapper1.pU_mean = 26.385 + wrapper1.pU_width = 0.279 + + + # do this once per "model" objects + #pathpriors.USR_raw_prior_Oi = wrapper1.path_raw_prior_Oi + + + # how do we change a parameter? We need to pass on the low-level model to the wrapper + plt.figure() + + for i,DM in enumerate([200,600,1000]): + + wrapper1.init_path_raw_prior_Oi(DM,g) + plt.plot(wrapper1.AppMags,wrapper1.priors,label="DM = "+str(DM)+", Simple",linestyle=styles[0]) + + # this is how we change the parameters of a state + # we first change the underlying state + # then we initialise the model + # then we re-init the wrapper. + fSFR=0. + model2.init_args(fSFR) + wrapper2.init_zmapping(g.zvals) + wrapper2.init_path_raw_prior_Oi(DM,g) + plt.plot(wrapper2.AppMags,wrapper2.priors,label="DM = "+str(DM)+", Loudas: $f_{\\rm sfr}$ = 0.0", + linestyle=styles[1],color=plt.gca().lines[-1].get_color()) + + fSFR=1.0 + model2.init_args(fSFR) + wrapper2.init_zmapping(g.zvals) + wrapper2.init_path_raw_prior_Oi(DM,g) + plt.plot(wrapper2.AppMags,wrapper2.priors,label="DM = "+str(DM)+", Loudas: $f_{\\rm sfr}$ = 1.0", + linestyle=styles[2],color=plt.gca().lines[-1].get_color()) + + wrapper3.init_zmapping(g.zvals) + wrapper3.init_path_raw_prior_Oi(DM,g) + plt.plot(wrapper3.AppMags,wrapper3.priors,label="DM = "+str(DM)+", Marnoch", + linestyle=styles[2],color=plt.gca().lines[-1].get_color()) + + plt.xlabel("Absolute magnitude $M_r$") + plt.ylabel("$p(M_r)$") + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"all_model_mag_priors_dm.png") + plt.close() + + # do this only for a particular FRB + # it gives a prior on apparent magnitude and pz + #AppMagPriors,pz = model.get_posterior(g,DMlist) + + + maglist = [None,None,None,None,None] + allPOx = [None,None,None,None,None] + + labels=["Orig","Simple","Mass-weighted","SFR weighted","Marnoch"] + markers=["x","+","s","o","v"] + + for i,frb in enumerate(frblist): + # interates over the FRBs. "Do FRB" + # P_O is the prior for each galaxy + # P_Ox is the posterior + # P_Ux is the posterior for it being unobserved + # mags is the list of galaxy magnitudes + + # determines if this FRB was seen by the survey, and + # if so, what its DMEG is + imatch = opt.matchFRB(frb,s) + if imatch is None: + print("Could not find ",frb," in survey") + continue + else: + print("Found FRB ",frb) + + DMEG = s.DMEGs[imatch] + + # original calculation + P_O1,P_Ox1,P_Ux1,mags1,ptbl = on.run_path(frb,usemodel=False,P_U=0.1) + + # record this info + if maglist[0] is None: + maglist[0] = mags1 + allPOx[0] = P_Ox1 + else: + maglist[0] = np.append(maglist[0],mags1) + allPOx[0] = np.append(allPOx[0],P_Ox1) + + + # simple model + wrapper1.init_path_raw_prior_Oi(DMEG,g) + PU2 = wrapper1.estimate_unseen_prior() # might not be correct + pathpriors.USR_raw_prior_Oi = wrapper1.path_raw_prior_Oi + P_O2,P_Ox2,P_Ux2,mags2,ptbl = on.run_path(frb,usemodel=True,P_U = PU2) + + + for imag,mag in enumerate(mags2): + if P_Ox2[imag] > 0.5 and P_Ox1[imag] < 0.5: + #print(i,frb,mag,P_Ox1[imag],P_Ox2[imag]) + print(frb,P_Ux1,PU2,DMEG) + for k,x in enumerate(P_Ox1): + print(mags1[k],x,P_Ox2[k]) + + # record this info + if maglist[1] is None: + maglist[1] = mags2 + allPOx[1] = P_Ox2 + else: + maglist[1] = np.append(maglist[1],mags2) + allPOx[1] = np.append(allPOx[1],P_Ox2) + + + # loudas fsfr = 0.0 (i.e., mass weighted) + fSFR=0.0 + model2.init_args(fSFR) + wrapper2.init_zmapping(g.zvals) + wrapper2.init_path_raw_prior_Oi(DMEG,g) + PU3 = wrapper2.estimate_unseen_prior() # might not be correct + pathpriors.USR_raw_prior_Oi = wrapper2.path_raw_prior_Oi + P_O3,P_Ox3,P_Ux3,mags3,ptbl = on.run_path(frb,usemodel=True,P_U = PU3) + + # record this info + if maglist[2] is None: + maglist[2] = mags3 + allPOx[2] = P_Ox3 + else: + maglist[2] = np.append(maglist[2],mags3) + allPOx[2] = np.append(allPOx[2],P_Ox3) + + + # loudas fsfr = 1.0 + fSFR=1.0 + model2.init_args(fSFR) + wrapper2.init_zmapping(g.zvals) + wrapper2.init_path_raw_prior_Oi(DMEG,g) + PU4 = wrapper2.estimate_unseen_prior() # might not be correct limit + pathpriors.USR_raw_prior_Oi = wrapper2.path_raw_prior_Oi + P_O4,P_Ox4,P_Ux4,mags4,ptbl = on.run_path(frb,usemodel=True,P_U = PU4) + + # record this info + if maglist[3] is None: + maglist[3] = mags4 + allPOx[3] = P_Ox4 + else: + maglist[3] = np.append(maglist[3],mags4) + allPOx[3] = np.append(allPOx[3],P_Ox4) + + # Marnoch model + wrapper3.init_zmapping(g.zvals) + wrapper3.init_path_raw_prior_Oi(DMEG,g) + PU5 = wrapper3.estimate_unseen_prior() # might not be correct limit + pathpriors.USR_raw_prior_Oi = wrapper3.path_raw_prior_Oi + P_O5,P_Ox5,P_Ux5,mags5,ptbl = on.run_path(frb,usemodel=True,P_U = PU5) + + # record this info + if maglist[4] is None: + maglist[4] = mags5 + allPOx[4] = P_Ox5 + else: + maglist[4] = np.append(maglist[4],mags5) + allPOx[4] = np.append(allPOx[4],P_Ox5) + + + # scatter plot of old vs new priors + plt.figure() + plt.xlabel("$P(O|x)$ (original)") + plt.ylabel("$P(O|x)$ (new)") + for j in np.arange(1,5,1): + plt.scatter(allPOx[0],allPOx[j],label=labels[j],marker=markers[j]) + plt.legend() + plt.tight_layout() + plt.savefig(opdir+"posterior_comparison.png") + plt.close() + + + + +if __name__ == "__main__": + + calc_path_priors() + + + diff --git a/zdm/scripts/PlotIndividualExperiments/plot_ASKAP_CRACO.py b/zdm/scripts/PlotIndividualExperiments/plot_ASKAP_CRACO.py index edaf2609..f4d8a67d 100644 --- a/zdm/scripts/PlotIndividualExperiments/plot_ASKAP_CRACO.py +++ b/zdm/scripts/PlotIndividualExperiments/plot_ASKAP_CRACO.py @@ -40,7 +40,7 @@ def main(): # Initialise surveys and grids sdir = resources.files('zdm').joinpath('data/Surveys') names=['CRAFT_CRACO_1300','CRAFT_CRACO_900'] - + ss,gs = loading.surveys_and_grids( survey_names=names,repeaters=False,init_state=state,sdir=sdir) # should be equal to actual number of FRBs, but for this purpose it doesn't matter @@ -68,6 +68,9 @@ def main(): zmax=zmax,DMmax=DMmax,Aconts=[0.01,0.1,0.5], FRBDMs=s.frbs['DMEG'].values,FRBZs=s.frbs['Z'].values, DMlines = s.frbs['DMEG'].values[noz]) + + + exit() pz = np.sum(mean_rates,axis=1) diff --git a/zdm/scripts/PlotIndividualExperiments/plot_ASKAP_ICS.py b/zdm/scripts/PlotIndividualExperiments/plot_ASKAP_ICS.py index a61356d8..fc53936e 100644 --- a/zdm/scripts/PlotIndividualExperiments/plot_ASKAP_ICS.py +++ b/zdm/scripts/PlotIndividualExperiments/plot_ASKAP_ICS.py @@ -46,7 +46,7 @@ def main(): # Initialise surveys and grids sdir = os.path.join(resource_filename('zdm', 'data'), 'Surveys') - names=['CRAFT_ICS_892','CRAFT_ICS_1300','CRAFT_ICS_1632'] + names=['CRAFT_ICS_892']#,'CRAFT_ICS_1300','CRAFT_ICS_1632'] state = parameters.State() state.set_astropy_cosmo(Planck18) @@ -58,17 +58,21 @@ def main(): # set limits for plots - will be LARGE! DMmax=3000 zmax=3. - # gets sum of rates over three sets of observations # weights by constant and TOBS time=0 + for i,g in enumerate(gs): + ################################# + #g.state.photo.smearing=True + #g.survey.survey_data.observing.Z_FRACTION="lsst_24.7" + #g.calc_rates() + ################################ if i==0: mean_rates=g.rates * ss[i].TOBS * 10**g.state.FRBdemo.lC else: mean_rates += g.rates * ss[i].TOBS * 10**g.state.FRBdemo.lC time += ss[i].TOBS - plt.figure() ax1 = plt.gca() @@ -76,9 +80,19 @@ def main(): ax2 = plt.gca() # chooses the first arbitrarily to extract zvals etc from + name = names[0] s=ss[0] g=gs[0] - name = names[0] + ######################## + ''' + samples=gs[1].GenMCSample(100) + FRBZs=np.zeros(len(samples)) + FRBDMs=np.zeros(len(samples)) + for i in range(len(samples)): + FRBZs[i]=samples[i][0] + FRBDMs[i]=samples[i][1] + ''' + ######################## figures.plot_grid(mean_rates,g.zvals,g.dmvals, name=opdir+name+"_zDM.pdf",norm=3,log=True, label='$\\log_{10} p({\\rm DM}_{\\rm IGM} + {\\rm DM}_{\\rm host},z)$ [a.u.]', @@ -134,7 +148,6 @@ def main(): plt.tight_layout() plt.savefig(opdir+name+"_pdm.pdf") plt.close() - diff --git a/zdm/scripts/PlotIndividualExperiments/plot_Meertrap.py b/zdm/scripts/PlotIndividualExperiments/plot_Meertrap.py index 89d68449..79cb0cac 100644 --- a/zdm/scripts/PlotIndividualExperiments/plot_Meertrap.py +++ b/zdm/scripts/PlotIndividualExperiments/plot_Meertrap.py @@ -36,7 +36,7 @@ def main(): # approximate best-fit values from recent analysis # load states from Hoffman et al 2025 state = states.load_state("HoffmannEmin25",scat="updated",rep=None) - opdir="MeerTRAP" + opdir="MeerTRAP/" if not os.path.exists(opdir): os.mkdir(opdir) @@ -63,7 +63,7 @@ def main(): name = names[0] figures.plot_grid(g.rates,g.zvals,g.dmvals, name=opdir+name+"_zDM.pdf",norm=3,log=True, - label='$\\log_{10} p({\\rm DM}_{\\rm IGM} + {\\rm DM}_{\\rm host},z)$ [a.u.]', + label='$\\log_{10} p({\\rm DM}_{\\rm cosmic} + {\\rm DM}_{\\rm host},z)$ [a.u.]', project=False,ylabel='${\\rm DM}_{\\rm IGM} + {\\rm DM}_{\\rm host}$', zmax=zmax,DMmax=DMmax,Aconts=[0.01,0.1,0.5]) diff --git a/zdm/scripts/PlotIndividualExperiments/plot_SKA.py b/zdm/scripts/PlotIndividualExperiments/plot_SKA.py index c23c5902..11a23e01 100644 --- a/zdm/scripts/PlotIndividualExperiments/plot_SKA.py +++ b/zdm/scripts/PlotIndividualExperiments/plot_SKA.py @@ -20,7 +20,7 @@ import numpy as np from zdm import survey from matplotlib import pyplot as plt -from pkg_resources import resource_filename +import importlib.resources as resources def main(): @@ -33,7 +33,7 @@ def main(): os.mkdir(opdir) # Initialise surveys and grids - sdir = os.path.join(resource_filename('zdm', 'data'), 'Surveys') + sdir = resources.files('zdm').joinpath('data/Surveys') names=['SKA_mid'] state = parameters.State() # approximate best-fit values from recent analysis diff --git a/zdm/states.py b/zdm/states.py index afb04ebc..02a8432d 100644 --- a/zdm/states.py +++ b/zdm/states.py @@ -84,7 +84,6 @@ def load_state(case="HoffmannHalo25",scat=None,rep=None): # update with relevant values state.update_param_dict(vparams) - return state def set_reps(vparams,rep): @@ -243,7 +242,7 @@ def set_fit_params(vparams,case): vparams['FRBdemo']['alpha_method'] = 1 vparams['FRBdemo']['source_evolution'] = 0 vparams['FRBdemo']['sfr_n'] = 2.88 - #vparams['FRBdemo']['lC'] = 3.15 # incorrect, check + vparams['FRBdemo']['lC'] = -6.24 vparams['host']['lmean'] = 2.13 vparams['host']['lsigma'] = 0.46 @@ -377,7 +376,7 @@ def set_updated_scat(vparams): if not 'scat' in vparams: - vparams['width'] = {} + vparams['scat'] = {} vparams['scat'] = {} vparams['scat']['Slogmean'] = -1.3 vparams['scat']['Slogsigma'] = 0.2 diff --git a/zdm/survey.py b/zdm/survey.py index 0379f753..14a707f3 100644 --- a/zdm/survey.py +++ b/zdm/survey.py @@ -130,6 +130,7 @@ def __init__(self, state, survey_name: str, # Load up self.process_survey_file(filename, NFRB, iFRB, min_lat=state.analysis.min_lat, dmg_cut=state.analysis.DMG_cut,survey_dict = survey_dict) + # Check if repeaters or not and set relevant parameters # Now done in loading # self.repeaters=False @@ -244,8 +245,8 @@ def init_widths(self,state=None): wlist = np.logspace(np.log10(self.WMin)+dlogw/2.,np.log10(self.WMax)-dlogw/2.,self.NWbins) wbins[0] -= 3 # ensures we capture low values! else: - wbins[0] = np.log10(self.WMin) - wbins[1] = np.log10(self.WMax) + wbins[0] = self.WMin + wbins[1] = self.WMax dlogw = np.log10(wbins[1]/wbins[0]) wlist = np.array([(self.WMax*self.WMin)**0.5]) self.wbins = wbins @@ -927,10 +928,20 @@ def process_survey_file(self,filename:str, # Meta -- for convenience for now; best to migrate away from this + default_telescope = survey_data.Telescope() + for key in self.survey_data.params: DC = self.survey_data.params[key] - self.meta[key] = getattr(self.survey_data[DC],key) - + if DC == "telescope": + value = getattr(self.survey_data[DC],key) + if value == getattr(default_telescope,key): + # using default value - check if the FRBs have this + if key in frb_tbl.columns: + value = np.mean(frb_tbl[key]) + self.meta[key] = value + else: + self.meta[key] = getattr(self.survey_data[DC],key) + # Get default values from default frb data default_frb = survey_data.FRB() @@ -938,15 +949,16 @@ def process_survey_file(self,filename:str, for field in fields(default_frb):\ # checks to see if this is a field in metadata: if so, takes priority if survey_dict is not None and field.name in survey_dict.keys(): - default_vaue = survey_dict[field.name] + default_value = survey_dict[field.name] elif field.name in self.meta.keys(): default_value = self.meta[field.name] else: default_value = getattr(default_frb, field.name) + # now checks for missing data, fills with the default value if field.name in frb_tbl.columns: - - # iterate over fields, checking if they are populated + # iterate over fields, checking if they are populated. + # only replaces values that are [] for i,val in enumerate(frb_tbl[field.name]): if isinstance(val,np.ma.core.MaskedArray): frb_tbl[field.name][i] = default_value @@ -1209,8 +1221,10 @@ def calc_max_dm(self): max_dmeg = max_dm - np.median(self.DMhalos + self.DMGs) max_idm = np.where(self.dmvals < max_dmeg)[0][-1] self.max_idm = max_idm + self.max_dmeg = max_dmeg else: self.max_idm = None + self.max_dmeg = None def get_efficiency_from_wlist(self,wlist,plist, model="Quadrature", diff --git a/zdm/survey_data.py b/zdm/survey_data.py index 35ff2b11..27b40931 100644 --- a/zdm/survey_data.py +++ b/zdm/survey_data.py @@ -299,6 +299,14 @@ class Observing(data_class.myDataClass): 'unit': 'pc/cm**3', 'Notation': '', }) + Z_FRACTION:str =field( + default=None, + metadata={'help':"Fraction of visible FRBs at a redshift"} + ) + Z_PHOTO:float =field( + default=0., + metadata={'help':"Gaussian photometric error on redshifts"} + ) class SurveyData(data_class.myData): """ Hold the SurveyData in a convenient object diff --git a/zdm/tests/test_energetics.py b/zdm/tests/test_energetics.py index 40256a2b..7bf39ccf 100644 --- a/zdm/tests/test_energetics.py +++ b/zdm/tests/test_energetics.py @@ -21,5 +21,3 @@ def test_init_gamma(): assert np.isclose(float(energetics.igamma_linear_log10[-1](0.)), float(energetics.igamma_linear[-1](1.)), rtol=1e-3) - -test_init_gamma() diff --git a/zdm/tests/test_path_prior.py b/zdm/tests/test_path_prior.py index b5edcadb..52e5ba1d 100644 --- a/zdm/tests/test_path_prior.py +++ b/zdm/tests/test_path_prior.py @@ -30,15 +30,23 @@ def test_path_priors(): state = parameters.State() cos.set_cosmology(state) cos.init_dist_measures() - model = opt.host_model() + model1 = opt.simple_host_model() + model2 = opt.marnoch_model() + model3 = opt.loudas_model() name='CRAFT_ICS_1300' ss,gs = loading.surveys_and_grids(survey_names=[name]) g = gs[0] s = ss[0] - # must be done once for any fixed zvals - model.init_zmapping(g.zvals) + # wrapper around the optical model. For returning p(m_r|DM) + wrapper1 = opt.model_wrapper(model1,g.zvals) # simple + wrapper2 = opt.model_wrapper(model2,g.zvals) # loudas with fsfr=0 + wrapper3 = opt.model_wrapper(model3,g.zvals) # loudas with fsfr=0 + # must be done once for any fixed zvals + wrapper1.init_zmapping(g.zvals) + wrapper2.init_zmapping(g.zvals) + wrapper3.init_zmapping(g.zvals) for frb in frblist: # interates over the FRBs. "Do FRB" @@ -56,8 +64,8 @@ def test_path_priors(): DMEG = s.DMEGs[imatch] - prior = model.init_path_raw_prior_Oi(DMEG,g) - PU = model.estimate_unseen_prior(mag_limit=26) # might not be correct + wrapper1.init_path_raw_prior_Oi(DMEG,g) + PU = wrapper1.estimate_unseen_prior() # might not be correct # the model should have calculated a valid unseen probability if PU < 0. or PU > 1.: @@ -66,11 +74,11 @@ def test_path_priors(): if not np.isfinite(PU): raise ValueError("Unseen probability PU is ",PU) - bad = np.where(prior < 0.)[0] + bad = np.where(wrapper1.priors < 0.)[0] if len(bad) > 0: raise ValueError("Some elements of model prior have negative probability") - OK = np.all(np.isfinite(prior)) + OK = np.all(np.isfinite(wrapper1.priors)) if not OK: raise ValueError("Some elements of magnitude priors are not finite") diff --git a/zdm/tests/test_scat_methods.py b/zdm/tests/test_scat_methods.py index 7c3471d1..c25fff55 100644 --- a/zdm/tests/test_scat_methods.py +++ b/zdm/tests/test_scat_methods.py @@ -1,6 +1,6 @@ #import pytest -from pkg_resources import resource_filename +import importlib.resources as resources import os import pytest #import copy @@ -46,7 +46,7 @@ def test_scat_methods(): # Initialise survey and grid # For this purporse, we only need two different surveys - sdir = os.path.join(resource_filename('zdm', 'data'), 'Surveys') + sdir = resources.files('zdm').joinpath('data/Surveys') name = 'CRAFT/ICS892' s1,g1 = loading.surveys_and_grids( state_dict=vparam_dict1,