diff --git a/docs/conf.py b/docs/conf.py index 8bde831..2d4b1a7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,7 +13,8 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. # import sys -sys.path.insert(0, '..') + +sys.path.insert(0, "..") # -- Project information ----------------------------------------------------- @@ -23,7 +24,7 @@ author = 'Mikkel Bue Lykkegaard, Sai-Aakash Ramesh, Louise Kluge, Ondřej Šimůnek, Jan Brezina' # The short X.Y version -version = '' +version = "" # The full version, including alpha/beta/rc tags release = '0.9.21' @@ -38,36 +39,36 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.autosummary', - 'numpydoc', - 'sphinx.ext.mathjax', - 'sphinx.ext.viewcode', + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "numpydoc", + "sphinx.ext.mathjax", + "sphinx.ext.viewcode", ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = 'en' +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None @@ -78,7 +79,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'alabaster' +html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -89,7 +90,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Custom sidebar templates, must be a dictionary that maps document names # to template names. @@ -105,7 +106,7 @@ # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'tinyDAdoc' +htmlhelp_basename = "tinyDAdoc" # -- Options for LaTeX output ------------------------------------------------ @@ -114,15 +115,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -132,8 +130,13 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'tinyDA.tex', 'tinyDA Documentation', - 'Mikkel Bue Lykkegaard', 'manual'), + ( + master_doc, + "tinyDA.tex", + "tinyDA Documentation", + "Mikkel Bue Lykkegaard", + "manual", + ), ] @@ -141,10 +144,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'tinyda', 'tinyDA Documentation', - [author], 1) -] +man_pages = [(master_doc, "tinyda", "tinyDA Documentation", [author], 1)] # -- Options for Texinfo output ---------------------------------------------- @@ -153,9 +153,15 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'tinyDA', 'tinyDA Documentation', - author, 'tinyDA', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "tinyDA", + "tinyDA Documentation", + author, + "tinyDA", + "One line description of project.", + "Miscellaneous", + ), ] @@ -174,7 +180,7 @@ # epub_uid = '' # A list of files that should not be packed into the epub file. -epub_exclude_files = ['search.html'] +epub_exclude_files = ["search.html"] # -- Extension configuration ------------------------------------------------- diff --git a/tinyDA/chain.py b/tinyDA/chain.py index 5ca4707..3347f6d 100644 --- a/tinyDA/chain.py +++ b/tinyDA/chain.py @@ -9,7 +9,6 @@ class Chain: - """Chain is a single level MCMC sampler. It is initialsed with a Posterior (which holds the model and the distributions, and returns Links), and a proposal (transition kernel). @@ -95,7 +94,7 @@ def sample(self, iterations, progressbar=True): for i in pbar: if progressbar: pbar.set_description( - "Running chain, \u03B1 = %0.2f" % np.mean(self.accepted[-100:]) + "Running chain, \u03b1 = %0.2f" % np.mean(self.accepted[-100:]) ) # draw a new proposal, given the previous parameters. @@ -130,7 +129,6 @@ def sample(self, iterations, progressbar=True): class DAChain: - """DAChain is a two-level Delayed Acceptance sampler. It takes a coarse and a fine posterior as input, as well as a proposal, which applies to the coarse level only. @@ -164,7 +162,7 @@ class DAChain: promoted_coarse : list List of coarse states ("Links") that are promoted to the fine chain subchain_lengths : list - List of integers that correspond to the actual subchain length that was + List of integers that correspond to the actual subchain length that was sampled randomly from a uniform distribution between 1 and subchain_length. chain_fine : list Samples ("Links") in the fine MCMC chain. @@ -229,7 +227,7 @@ def __init__( self.posterior_fine = posterior_fine self.proposal = proposal self.subchain_length = subchain_length - self.randomize_subchain_length = randomize_subchain_length + self.randomize_subchain_length = randomize_subchain_length # set up lists to hold coarse and fine links, as well as acceptance # accounting @@ -298,7 +296,9 @@ def __init__( self.model_diff, self.bias.get_sigma() ) else: - raise ValueError("Adaptive error model can only be state-dependent, state-independent or None.") + raise ValueError( + "Adaptive error model can only be state-dependent, state-independent or None." + ) self.chain_coarse[-1] = self.posterior_coarse.update_link( self.chain_coarse[-1] @@ -309,19 +309,21 @@ def __init__( if self.randomize_subchain_length: if self.subchain_length == 1: - raise ValueError("Randomize subchain length requires a subchain_length > 1.") + raise ValueError( + "Randomize subchain length requires a subchain_length > 1." + ) if not self.store_coarse_chain: - raise ValueError("Randomize subchain length requires storing the coarse chain.") - + raise ValueError( + "Randomize subchain length requires storing the coarse chain." + ) + if self.randomize_subchain_length: - # this private method returns np.random.randint(-self.subsampling_rate,0) + # this private method returns np.random.randint(-self.subchain_length,0) self._get_proposal_index = self._get_random_proposal_index else: # this private method always returns -1 self._get_proposal_index = self._get_fixed_proposal_index - - def sample(self, iterations, progressbar=True): """ Parameters @@ -342,7 +344,7 @@ def sample(self, iterations, progressbar=True): for i in pbar: if progressbar: pbar.set_description( - "Running chain, \u03B1_c = {0:.3f}, \u03B1_f = {1:.2f}".format( + "Running chain, \u03b1_c = {0:.3f}, \u03b1_f = {1:.2f}".format( np.mean( self.accepted_coarse[-int(100 * self.subchain_length) :] ), @@ -357,9 +359,7 @@ def sample(self, iterations, progressbar=True): if sum(self.accepted_coarse[-self.subchain_length :]) == 0: self.chain_fine.append(self.chain_fine[-1]) self.accepted_fine.append(False) - self.chain_coarse.append( - self.chain_coarse[-(self.subchain_length + 1)] - ) + self.chain_coarse.append(self.chain_coarse[-(self.subchain_length + 1)]) self.accepted_coarse.append(False) self.is_coarse.append(False) @@ -370,9 +370,9 @@ def sample(self, iterations, progressbar=True): proposal_link_fine = self.posterior_fine.create_link( self.chain_coarse[proposal_index].parameters ) - self.promoted_coarse.append(self.chain_coarse[proposal_index]) + self.promoted_coarse.append(self.chain_coarse[proposal_index]) # add effective subchain lenght to list - self.subchain_lengths.append(proposal_index + self.subchain_length+1) + self.subchain_lengths.append(proposal_index + self.subchain_length + 1) # compute the delayed acceptance probability. if self.adaptive_error_model == "state-dependent": @@ -445,7 +445,9 @@ def _sample_coarse(self): def _get_state_dependent_acceptance(self, proposal_link_fine): # compute the bias at the proposal. - bias_next = proposal_link_fine.model_output - self.promoted_coarse[-1].model_output + bias_next = ( + proposal_link_fine.model_output - self.promoted_coarse[-1].model_output + ) # create a throwaway link representing the reverse state. coarse_state_biased = self.posterior_coarse.update_link( @@ -523,16 +525,14 @@ def _update_error_model(self): self.chain_coarse[-1] = self.posterior_coarse.update_link(self.chain_coarse[-1]) def _get_random_proposal_index(self): - random_proposal_index = np.random.randint(-self.subchain_length,0) + random_proposal_index = np.random.randint(-self.subchain_length, 0) return random_proposal_index - + def _get_fixed_proposal_index(self): return -1 - class MLDAChain: - """MLDAChain is a Multilevel Delayed Acceptance sampler. It takes a list of posteriors of increasing level as input, as well as a proposal, which applies to the coarsest level only. @@ -557,6 +557,10 @@ class MLDAChain: List of bool, signifying whether a proposal was accepted or not. adaptive_error_model : str or None The adaptive error model, see e.g. Cui et al. (2019). + randomize_subchain_length : bool, optional + Randomizes the subchain lengths, see e.g. Liu (2009). Sample to be promoted + is drawn from uniform distribution, between 1 and subchain_length. + Default is False. bias : tinaDA.RecursiveSampleMoments A recursive Gaussian error model that computes the sample moments of the next-coarser bias. @@ -572,6 +576,7 @@ def __init__( posteriors, proposal, subchain_lengths, + randomize_subchain_length=False, initial_parameters=None, adaptive_error_model=None, store_coarse_chain=True, @@ -597,13 +602,17 @@ def __init__( store_coarse_chain : bool, optional Whether to store the coarse chains. Disable if the sampler is taking up too much memory. Default is True. + randomize_subchain_length : bool, optional + Randomizes the subchain lengths, see e.g. Liu (2009). Sample + to be promoted is drawn from uniform distribution, between 1 + and subchain_length. Default is False. """ # internalise the finest posterior and set the level. self.posterior = posteriors[-1] self.level = len(posteriors) - 1 - # set the furrent level subchain length. + # set the current level subchain length. self.subchain_length = subchain_lengths[-1] # initialise a list, which holds the links. @@ -630,6 +639,9 @@ def __init__( # set whether to store the coarse chain self.store_coarse_chain = store_coarse_chain + # set wether to randomize subchain lengths + self.randomize_subchain_length = randomize_subchain_length + # set the effective proposal to MLDA which runs on the next-coarser level. self.proposal = MLDA( posteriors[:-1], @@ -638,6 +650,7 @@ def __init__( self.initial_parameters, self.adaptive_error_model, self.store_coarse_chain, + self.randomize_subchain_length ) # set up the adaptive error model. @@ -665,7 +678,9 @@ def __init__( elif self.adaptive_error_model == "state-dependent": pass else: - raise ValueError("Adaptive error model can only be state-dependent, state-independent or None.") + raise ValueError( + "Adaptive error model can only be state-dependent, state-independent or None." + ) # update the first coarser link with the adaptive error model. self.proposal.chain[-1] = self.proposal.posterior.update_link( self.proposal.chain[-1] @@ -697,7 +712,7 @@ def sample(self, iterations, progressbar=True): for i in pbar: if progressbar: pbar.set_description( - "Running chain, \u03B1 = %0.2f" % np.mean(self.accepted[-100:]) + "Running chain, \u03b1 = %0.2f" % np.mean(self.accepted[-100:]) ) # remove everything except the latest coarse link, if the coarse diff --git a/tinyDA/diagnostics.py b/tinyDA/diagnostics.py index 06cfd51..3636a7e 100644 --- a/tinyDA/diagnostics.py +++ b/tinyDA/diagnostics.py @@ -20,7 +20,7 @@ def to_inference_data(chain, level="fine", burnin=0, parameter_names=None): burnin : int, optional The burnin length. The default is 0. parameter_names : list, optional - List of the names of the parameters in the chain, in the same order + List of the names of the parameters in the chain, in the same order as they appear in each link. Default is None, meaning that parameters will be named [x1, x2, ...]. @@ -149,8 +149,10 @@ def get_samples(chain, attribute="parameters", level="fine", burnin=0): "attribute": attribute, } - if attribute == 'stats': - getattribute = lambda link, attribute: np.array([link.prior, link.likelihood, link.posterior]) + if attribute == "stats": + getattribute = lambda link, attribute: np.array( + [link.prior, link.likelihood, link.posterior] + ) else: getattribute = lambda link, attribute: getattr(link, attribute) @@ -159,7 +161,10 @@ def get_samples(chain, attribute="parameters", level="fine", burnin=0): # extract link attribute. for i in range(chain["n_chains"]): samples["chain_{}".format(i)] = np.array( - [getattribute(link, attribute) for link in chain["chain_{}".format(i)][burnin:]] + [ + getattribute(link, attribute) + for link in chain["chain_{}".format(i)][burnin:] + ] ) # if the input is a Delayed Acceptance chain. @@ -177,7 +182,6 @@ def get_samples(chain, attribute="parameters", level="fine", burnin=0): ] ) - # if the input is a Delayed Acceptance chain. elif chain["sampler"] == "MLDA": # copy the subchain length across. @@ -207,3 +211,29 @@ def get_samples(chain, attribute="parameters", level="fine", burnin=0): # return the samples. return samples + + +def MLDA_estimators(chain, attribute="qoi", variable="x0", burnin=0): + """Computes the unbiased Monte-Carlo estimator for Multilevel Delayed Acceptance + chains, as derived in Lykkegaard et al. 2023. + + Parameters + ---------- + chain : dict + A dict as returned by tinyDA.sample, containing chain information + and lists of tinyDA.Link instances. + attribute : str, optional + Which link attribute ('parameters', 'model_output', 'qoi' or 'stats') + to extract. The default is 'parameters'. + variable : str, optional + Which variable of the posterior or qoi to marginalize over. + burnin : int, optional + The burnin length. The default is 0. + Returns + ---------- + float + Outpu of the estimator computation. + """ + + estimator = 0 + return estimator \ No newline at end of file diff --git a/tinyDA/distributions.py b/tinyDA/distributions.py index 6518f78..bdbda2b 100644 --- a/tinyDA/distributions.py +++ b/tinyDA/distributions.py @@ -6,7 +6,6 @@ class JointPrior: - """JointPrior is a wrapper for a list of priors, if the parameters have different types of priors. The order must match the order of parameters for the model, since parameters are unnamed. @@ -107,7 +106,6 @@ def CompositePrior(*args, **kwargs): class PoissonPointProcess: - """PoissonPointProcess is a geometric prior, where the number of points has a Poisson distribution and their locations are uniformly distributed on the domain. Additional geometric attributes of the points can also be diff --git a/tinyDA/link.py b/tinyDA/link.py index 152647f..37ca188 100644 --- a/tinyDA/link.py +++ b/tinyDA/link.py @@ -1,5 +1,4 @@ class Link: - """The Link class holds all relevant information about an MCMC sample, i.e. parameters, prior log-desnity, model output, log-likelihood and possibly a Quantity of Interest (QoI) @@ -21,7 +20,6 @@ class Link: """ def __init__(self, parameters, prior, model_output, likelihood, qoi=None): - """ Parameters ---------- diff --git a/tinyDA/proposal.py b/tinyDA/proposal.py index 5e8893c..850ca68 100644 --- a/tinyDA/proposal.py +++ b/tinyDA/proposal.py @@ -1344,6 +1344,7 @@ def __init__( initial_parameters, adaptive_error_model, store_coarse_chain, + randomize_subchain_length, ): """ Parameters @@ -1363,6 +1364,11 @@ def __init__( is None (no error model), options are 'state-independent' or 'state-dependent'. If an error model is used, the likelihood MUST have a set_bias() method, use e.g. tinyDA.AdaptiveLogLike. + radomize_subchain_length : bool, default is false. + If set "True", the subchain lenght will be sampled from a + uniform distribution [1, subchain length] at every level. This + is needed for computing the unbiased multilevel Monte Carlo + estimator (see Lykkegaard 2023). """ # internalise the current level posterior and set the level. @@ -1374,11 +1380,13 @@ def __init__( self.chain = [] self.accepted = [] self.is_local = [] + self.promoted = [] # create a link from the initial parameters and write to the histories. self.chain.append(self.posterior.create_link(self.initial_parameters)) self.accepted.append(True) self.is_local.append(False) + self.promoted.append(self.chain[-1]) # set the adaptive error model as an attribute. self.adaptive_error_model = adaptive_error_model @@ -1386,6 +1394,24 @@ def __init__( # set whether to store the coarse chain self.store_coarse_chain = store_coarse_chain + # set whether to randomize the subchain length + self.randomize_subchain_length = randomize_subchain_length + + # check that store coarse chain is on in case of randomized subchain length + if self.randomize_subchain_length: + if not self.store_coarse_chain: + raise ValueError( + "Randomize subchain length requires storing the coarse chain." + ) + + # set proposal index + if self.randomize_subchain_length: + # this private method returns np.random.randint(-self.subchain_length,0) + self._get_proposal_index = self._get_random_proposal_index + else: + # this private method always returns -1 + self._get_proposal_index = self._get_fixed_proposal_index + # if this level is not the coarsest level. if self.level > 0: # internalise the subchain length. @@ -1399,6 +1425,7 @@ def __init__( self.initial_parameters, self.adaptive_error_model, self.store_coarse_chain, + self.randomize_subchain_length, ) # set the current level make_proposal method to MLDA. @@ -1408,7 +1435,7 @@ def __init__( if self.adaptive_error_model is not None: # compute the difference between coarse and fine level. self.model_diff = ( - self.chain[-1].model_output - self.proposal.chain[-1].model_output + self.chain[-1].model_output - self.proposal.promoted[-1].model_output ) # set up the state-independent adaptive error model. @@ -1495,7 +1522,7 @@ def align_chain(self, parameters, accepted): def _reset_chain(self): # remove everything except the latest coarse link, if the coarse # chain shouldn't be stored. - self.chain = [self.chain[-1]] + self.chain = [self.chain[-self.proposal_index]] if self.level > 0: self.proposal._reset_chain() @@ -1505,8 +1532,11 @@ def make_mlda_proposal(self, subchain_length): ---------- subchain length : int The number of samples drawn in the subchain. + proposal index : int + Index of the sample to be promoted in this subchain. + This only differs from subchain_length if randomize_subchain_length is true. """ - + proposal_index = self._get_proposal_index(subchain_length) # iterate through the subsamples. for i in range(subchain_length): # create a proposal from the next-lower level, @@ -1527,7 +1557,7 @@ def make_mlda_proposal(self, subchain_length): alpha = self.proposal.get_acceptance( proposal_link, self.chain[-1], - self.proposal.chain[-1], + self.proposal.chain[-1], # this is the element forwarded by the subchain self.proposal.chain[-(self.subchain_length + 1)], ) @@ -1576,12 +1606,14 @@ def make_mlda_proposal(self, subchain_length): self.proposal.chain[-1] = self.proposal.posterior.update_link( self.proposal.chain[-1] ) - + self.promoted.append(self.chain[proposal_index]) # return the latest link. - return self.chain[-1].parameters + return self.chain[proposal_index].parameters def make_base_proposal(self, subchain_length): # iterate through the subsamples. + proposal_index = self._get_proposal_index(subchain_length) + for i in range(subchain_length): # draw a new proposal, given the previous parameters. proposal = self.proposal.make_proposal(self.chain[-1]) @@ -1609,8 +1641,9 @@ def make_base_proposal(self, subchain_length): parameters_previous=self.chain[-2].parameters, accepted=self.accepted, ) + self.promoted.append(self.chain[proposal_index]) # return the latest link. - return self.chain[-1].parameters + return self.chain[proposal_index].parameters def get_acceptance( self, proposal_link, previous_link, proposal_link_below, previous_link_below @@ -1622,6 +1655,13 @@ def get_acceptance( + previous_link_below.posterior - proposal_link_below.posterior ) + + def _get_random_proposal_index(self, subchain_length): + random_proposal_index = np.random.randint(-subchain_length, 0) + return random_proposal_index + + def _get_fixed_proposal_index(self, subchain_length): + return -1 class DREAM(DREAMZ, SharedArchiveProposal): @@ -1653,4 +1693,4 @@ def adapt(self, **kwargs): def make_proposal(self, link): Z = self.read_archive() - return super().make_proposal(link, Z) + return super().make_proposal(link, Z) \ No newline at end of file diff --git a/tinyDA/ray.py b/tinyDA/ray.py index 243bbe7..8d7eeee 100644 --- a/tinyDA/ray.py +++ b/tinyDA/ray.py @@ -10,7 +10,6 @@ class ParallelChain: - """ParallelChain creates n_chains instances of tinyDA.Chain and runs the chains in parallel. It is initialsed with a Posterior (which holds the model and the distributions, and returns Links), and a proposal (transition @@ -36,7 +35,7 @@ class ParallelChain: Runs the MCMC for the specified number of iterations. """ - def __init__(self, posterior, proposal, n_chains=2, initial_parameters=None): + def __init__(self, posterior, proposal, n_chains=2, initial_parameters=None, randomize_subchain_length=False): """ Parameters ---------- @@ -62,13 +61,15 @@ def __init__(self, posterior, proposal, n_chains=2, initial_parameters=None): # set the initial parameters. self.initial_parameters = initial_parameters + self.randomize_suchain_length = randomize_subchain_length + # initialise Ray. ray.init(ignore_reinit_error=True) # set up the parallel chains as Ray actors. self.remote_chains = [ RemoteChain.remote( - self.posterior, self.proposal[i], self.initial_parameters[i] + self.posterior, self.proposal[i], self.initial_parameters[i], self.randomize_suchain_length ) for i in range(self.n_chains) ] @@ -150,6 +151,7 @@ def __init__( posteriors, proposal, subchain_lengths=None, + randomize_subchain_length=False, n_chains=2, initial_parameters=None, adaptive_error_model=None, @@ -172,6 +174,8 @@ def __init__( # whether to store the coarse chain. self.store_coarse_chain = store_coarse_chain + self.randomize_subchain_length = randomize_subchain_length + # initialise Ray. ray.init(ignore_reinit_error=True) @@ -181,6 +185,7 @@ def __init__( self.posteriors, self.proposal[i], self.subchain_lengths, + self.randomize_subchain_length, self.initial_parameters[i], self.adaptive_error_model, self.store_coarse_chain, @@ -211,7 +216,6 @@ def sample(self, iterations, progressbar): class MultipleTry(Proposal): - """Multiple-Try proposal (Liu et al. 2000), which will take any other TinyDA proposal as a kernel. If the kernel is symmetric, it uses MTM(II), otherwise it uses MTM(I). The parameter k sets the number of tries. diff --git a/tinyDA/sampler.py b/tinyDA/sampler.py index 050ebde..12c4815 100644 --- a/tinyDA/sampler.py +++ b/tinyDA/sampler.py @@ -79,6 +79,9 @@ def sample( the same subchain length will be used for all levels. If running single-level MCMC, this parameter is ignored. Default is 1, resulting in "classic" DA MCMC for a two-level model. + randomize_subchain_length: bool, optional + Randomizes the subchain length, as described in Lykkegaard et al. + (2023). Default is false. adaptive_error_model : str or None, optional The adaptive error model, see e.g. Cui et al. (2019). If running single-level MCMC, this parameter is ignored. Default is None @@ -109,9 +112,10 @@ def sample( arviz.InferenceData object. """ - if subsampling_rate is not None: - warnings.warn(" subsampling_rate has been deprecated in favour of subchain_length.") + warnings.warn( + " subsampling_rate has been deprecated in favour of subchain_length." + ) subchain_length = subsampling_rate # get the availability flag. @@ -272,6 +276,7 @@ def sample( n_chains, initial_parameters, subchain_lengths, + randomize_subchain_length, adaptive_error_model, store_coarse_chain, ) @@ -284,6 +289,7 @@ def sample( n_chains, initial_parameters, subchain_lengths, + randomize_subchain_length, adaptive_error_model, store_coarse_chain, force_progress_bar, @@ -316,17 +322,20 @@ def _sample_parallel( n_chains, initial_parameters, force_progress_bar, + randomize_subchain_length, ): """Helper function for tinyDA.sample()""" print("Sampling {} chains in parallel".format(n_chains)) # create a parallel sampling instance and sample. - chains = ParallelChain(posteriors[0], proposal, n_chains, initial_parameters) + chains = ParallelChain(posteriors[0], proposal, n_chains, initial_parameters, randomize_subchain_length) chains.sample(iterations, force_progress_bar) info = {"sampler": "MH", "n_chains": n_chains, "iterations": iterations + 1} - chains = {"chain_{}".format(i): chain.chain for i, chain in enumerate(chains.chains)} + chains = { + "chain_{}".format(i): chain.chain for i, chain in enumerate(chains.chains) + } # return the samples. return {**info, **chains} @@ -403,6 +412,7 @@ def _sample_parallel_da( return result + def _get_result_da( chains, iterations, @@ -438,6 +448,7 @@ def _get_result_da( # return eveything. return {**info, **chains_coarse, **chains_fine} + def _sample_sequential_mlda( posteriors, proposal, @@ -445,6 +456,7 @@ def _sample_sequential_mlda( n_chains, initial_parameters, subchain_lengths, + randomize_subchain_length, adaptive_error_model, store_coarse_chain, ): @@ -461,6 +473,7 @@ def _sample_sequential_mlda( posteriors, proposal[i], subchain_lengths, + randomize_subchain_length, initial_parameters[i], adaptive_error_model, store_coarse_chain, @@ -468,7 +481,9 @@ def _sample_sequential_mlda( ) chains[i].sample(iterations) - result = _get_result_mlda(chains, levels, iterations, subchain_lengths, store_coarse_chain) + result = _get_result_mlda( + chains, levels, iterations, subchain_lengths, randomize_subchain_length, store_coarse_chain, + ) return result @@ -480,6 +495,7 @@ def _sample_parallel_mlda( n_chains, initial_parameters, subchain_lengths, + randomize_subchain_length, adaptive_error_model, store_coarse_chain, force_progress_bar, @@ -495,6 +511,7 @@ def _sample_parallel_mlda( posteriors, proposal, subchain_lengths, + randomize_subchain_length, n_chains, initial_parameters, adaptive_error_model, @@ -503,16 +520,21 @@ def _sample_parallel_mlda( parallel_chain.sample(iterations, force_progress_bar) chains = parallel_chain.chains - result = _get_result_mlda(chains, levels, iterations, subchain_lengths, store_coarse_chain) + result = _get_result_mlda( + chains, levels, iterations, subchain_lengths, randomize_subchain_length, store_coarse_chain, + ) return result + def _get_result_mlda( chains, levels, iterations, subchain_lengths, + randomize_subchain_length, store_coarse_chain, + ): info = { @@ -521,6 +543,7 @@ def _get_result_mlda( "iterations": iterations + 1, "levels": levels, "subchain_lengths": subchain_lengths, + "randomize_subchain_length": randomize_subchain_length, } # collect and return the samples. @@ -537,11 +560,20 @@ def _get_result_mlda( "chain_l{}_{}".format(i, j): list(compress(chain.chain, chain.is_local)) for j, chain in enumerate(_current) } + promoted_current = { + "promoted_l{}_{}".format(i, j): chain.promoted + for j, chain in enumerate(_current) + } else: chains_current = { "chain_l{}_{}".format(i, j): None for j, chain in enumerate(_current) } - chains_all = {**chains_all, **chains_current} + promoted_current = { + "promoted_l{}_{}".format(i, j): None for j, chain in enumerate(_current) + } + chains_all = {**chains_all, **chains_current, **promoted_current} _current = [chain.proposal for chain in _current] + + return {**info, **chains_all} diff --git a/tinyDA/umbridge.py b/tinyDA/umbridge.py index 16e958c..7118264 100644 --- a/tinyDA/umbridge.py +++ b/tinyDA/umbridge.py @@ -1,7 +1,7 @@ import numpy as np -class UmBridgeModel: +class UmBridgeModel: """UmBridgeModel provides a wrapper for an UM-Bridge HTTPModel, which allows for using UM-Bridge forward operators directly in a tinyDA BlackBoxLinkFactory. @@ -22,7 +22,6 @@ class UmBridgeModel: """ def __init__(self, umbridge_model, pre=None, umbridge_config={}): - """ Parameters ---------- @@ -54,7 +53,6 @@ def __init__(self, umbridge_model, pre=None, umbridge_config={}): self.umbridge_config = umbridge_config def __call__(self, parameters): - """ Parameters ---------- @@ -86,7 +84,9 @@ def _gradient(self, parameters, sensitivity): umbridge_sens = sensitivity.tolist() # send converted model input the the UM-Bridge model. - umbridge_gradient = self.umbridge_model.gradient(0, 0, umbridge_input, umbridge_sens, self.umbridge_config) + umbridge_gradient = self.umbridge_model.gradient( + 0, 0, umbridge_input, umbridge_sens, self.umbridge_config + ) # convert the UM-Bridge output back to a NumPy array. gradient = np.array(umbridge_gradient).flatten() diff --git a/tinyDA/utils.py b/tinyDA/utils.py index ca7cbcc..8bf31e9 100644 --- a/tinyDA/utils.py +++ b/tinyDA/utils.py @@ -7,7 +7,6 @@ class RecursiveSampleMoments: - """Iteratively constructs a sample mean and covariance, given input samples. Used to capture an estimate of the mean and covariance of the bias of an MLDA coarse model, and for the Adaptive Metropolis (AM) proposal. @@ -125,7 +124,6 @@ def update(self, x): class ZeroMeanRecursiveSampleMoments(RecursiveSampleMoments): - """Iteratively constructs a sample covariance, with zero mean given input samples. It is a specialised version of RecursiveSampleMoments, used only in the state dependent error model.