From 3febf9a66638082d965bfa6bdd8e337bfe404553 Mon Sep 17 00:00:00 2001 From: louisekluge Date: Thu, 19 Dec 2024 18:01:57 +0100 Subject: [PATCH 01/21] Necessary facilities for randomize_subchain_length in the MLDA proposal. --- tinyDA/proposal.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/tinyDA/proposal.py b/tinyDA/proposal.py index a4e72dd..9d538dd 100644 --- a/tinyDA/proposal.py +++ b/tinyDA/proposal.py @@ -1316,6 +1316,7 @@ def __init__( initial_parameters, adaptive_error_model, store_coarse_chain, + randomize_subchain_length, ): """ Parameters @@ -1335,6 +1336,11 @@ def __init__( is None (no error model), options are 'state-independent' or 'state-dependent'. If an error model is used, the likelihood MUST have a set_bias() method, use e.g. tinyDA.AdaptiveLogLike. + radomize_subchain_length : bool, default is false. + If set "True", the subchain lenght will be sampled from a + uniform distribution [1, subchain length] at every level. This + is needed for computing the unbiased multilevel Monte Carlo + estimator (see Lykkegaard 2023). """ # internalise the current level posterior and set the level. @@ -1346,6 +1352,7 @@ def __init__( self.chain = [] self.accepted = [] self.is_local = [] + self.promoted = [] # create a link from the initial parameters and write to the histories. self.chain.append(self.posterior.create_link(self.initial_parameters)) @@ -1358,9 +1365,12 @@ def __init__( # set whether to store the coarse chain self.store_coarse_chain = store_coarse_chain + # set whether to randomize the subchain length + self.randomize_subchain_lenght = randomize_subchain_length + # if this level is not the coarsest level. if self.level > 0: - # internalise the subchain length. + # internalise the subchain length. If randomize_subchain_lenght self.subchain_length = subchain_lengths[-1] # set MDLA as the proposal on the next-coarser level. @@ -1371,6 +1381,7 @@ def __init__( self.initial_parameters, self.adaptive_error_model, self.store_coarse_chain, + self.randomize_subchain_lenght, ) # set the current level make_proposal method to MLDA. @@ -1594,3 +1605,5 @@ def get_acceptance( + previous_link_below.posterior - proposal_link_below.posterior ) + + From 08a30f83fc3e3f65777879fccfbf5a967ef8b9e5 Mon Sep 17 00:00:00 2001 From: louisekluge Date: Tue, 18 Mar 2025 17:07:39 +0100 Subject: [PATCH 02/21] put in checks for store_coarse_chain, started to modify make_mlda_proposal --- tinyDA/proposal.py | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/tinyDA/proposal.py b/tinyDA/proposal.py index 9d538dd..cafb813 100644 --- a/tinyDA/proposal.py +++ b/tinyDA/proposal.py @@ -1337,9 +1337,9 @@ def __init__( 'state-dependent'. If an error model is used, the likelihood MUST have a set_bias() method, use e.g. tinyDA.AdaptiveLogLike. radomize_subchain_length : bool, default is false. - If set "True", the subchain lenght will be sampled from a - uniform distribution [1, subchain length] at every level. This - is needed for computing the unbiased multilevel Monte Carlo + If set "True", the subchain lenght will be sampled from a + uniform distribution [1, subchain length] at every level. This + is needed for computing the unbiased multilevel Monte Carlo estimator (see Lykkegaard 2023). """ @@ -1368,12 +1368,27 @@ def __init__( # set whether to randomize the subchain length self.randomize_subchain_lenght = randomize_subchain_length + # check that store coarse chain is on in case of randomized subchain length + if self.randomize_subchain_lenght: + if not self.store_coarse_chain: + raise ValueError( + "Randomize subchain length requires storing the coarse chain." + ) + # if this level is not the coarsest level. if self.level > 0: - # internalise the subchain length. If randomize_subchain_lenght + # internalise the subchain length. self.subchain_length = subchain_lengths[-1] - # set MDLA as the proposal on the next-coarser level. + # set proposal index + if self.randomize_subchain_length: + # this private method returns np.random.randint(-self.subchain_length,0) + self._get_proposal_index = self._get_random_proposal_index + else: + # this private method always returns -1 + self._get_proposal_index = self._get_fixed_proposal_index + # set MDLA as the proposal on the next-coarser level. + self.proposal = MLDA( posteriors[:-1], proposal, @@ -1482,12 +1497,15 @@ def _reset_chain(self): if self.level > 0: self.proposal._reset_chain() - def make_mlda_proposal(self, subchain_length): + def make_mlda_proposal(self, subchain_length, proposal_index): """ Parameters ---------- subchain length : int The number of samples drawn in the subchain. + proposal index : int + Index of the sample to be promoted in this subchain. + This only differs from subchain_length if randomize_subchain_length is true. """ # iterate through the subsamples. @@ -1606,4 +1624,9 @@ def get_acceptance( - proposal_link_below.posterior ) - + def _get_random_proposal_index(self): + random_proposal_index = np.random.randint(-self.subchain_length, 0) + return random_proposal_index + + def _get_fixed_proposal_index(self): + return -1 \ No newline at end of file From 34b598dd37f45ffcf79e7af9bc36c5a18d868c3f Mon Sep 17 00:00:00 2001 From: louisekluge Date: Tue, 18 Mar 2025 17:08:57 +0100 Subject: [PATCH 03/21] local changes --- docs/conf.py | 70 +++++++++++++++++++++++++---------------------- tinyDA/ray.py | 2 -- tinyDA/sampler.py | 20 ++++++++++---- 3 files changed, 53 insertions(+), 39 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index e445cb2..66050a5 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,19 +13,20 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. # import sys -sys.path.insert(0, '..') + +sys.path.insert(0, "..") # -- Project information ----------------------------------------------------- -project = 'tinyDA' -copyright = '2021, Mikkel Bue Lykkegaard' -author = 'Mikkel Bue Lykkegaard, Sai-Aakash Ramesh, Louise Kluge' +project = "tinyDA" +copyright = "2021, Mikkel Bue Lykkegaard" +author = "Mikkel Bue Lykkegaard, Sai-Aakash Ramesh, Louise Kluge" # The short X.Y version -version = '' +version = "" # The full version, including alpha/beta/rc tags -release = '0.9.20' +release = "0.9.20" # -- General configuration --------------------------------------------------- @@ -38,36 +39,36 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.autosummary', - 'numpydoc', - 'sphinx.ext.mathjax', - 'sphinx.ext.viewcode', + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "numpydoc", + "sphinx.ext.mathjax", + "sphinx.ext.viewcode", ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = 'en' +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None @@ -78,7 +79,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'alabaster' +html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -89,7 +90,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Custom sidebar templates, must be a dictionary that maps document names # to template names. @@ -105,7 +106,7 @@ # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'tinyDAdoc' +htmlhelp_basename = "tinyDAdoc" # -- Options for LaTeX output ------------------------------------------------ @@ -114,15 +115,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -132,8 +130,13 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'tinyDA.tex', 'tinyDA Documentation', - 'Mikkel Bue Lykkegaard', 'manual'), + ( + master_doc, + "tinyDA.tex", + "tinyDA Documentation", + "Mikkel Bue Lykkegaard", + "manual", + ), ] @@ -141,10 +144,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'tinyda', 'tinyDA Documentation', - [author], 1) -] +man_pages = [(master_doc, "tinyda", "tinyDA Documentation", [author], 1)] # -- Options for Texinfo output ---------------------------------------------- @@ -153,9 +153,15 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'tinyDA', 'tinyDA Documentation', - author, 'tinyDA', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "tinyDA", + "tinyDA Documentation", + author, + "tinyDA", + "One line description of project.", + "Miscellaneous", + ), ] @@ -174,7 +180,7 @@ # epub_uid = '' # A list of files that should not be packed into the epub file. -epub_exclude_files = ['search.html'] +epub_exclude_files = ["search.html"] # -- Extension configuration ------------------------------------------------- diff --git a/tinyDA/ray.py b/tinyDA/ray.py index 2980a68..2f5c09b 100644 --- a/tinyDA/ray.py +++ b/tinyDA/ray.py @@ -10,7 +10,6 @@ class ParallelChain: - """ParallelChain creates n_chains instances of tinyDA.Chain and runs the chains in parallel. It is initialsed with a Posterior (which holds the model and the distributions, and returns Links), and a proposal (transition @@ -211,7 +210,6 @@ def sample(self, iterations, progressbar): class MultipleTry(Proposal): - """Multiple-Try proposal (Liu et al. 2000), which will take any other TinyDA proposal as a kernel. If the kernel is symmetric, it uses MTM(II), otherwise it uses MTM(I). The parameter k sets the number of tries. diff --git a/tinyDA/sampler.py b/tinyDA/sampler.py index 375abbf..9d73db2 100644 --- a/tinyDA/sampler.py +++ b/tinyDA/sampler.py @@ -109,9 +109,10 @@ def sample( arviz.InferenceData object. """ - if subsampling_rate is not None: - warnings.warn(" subsampling_rate has been deprecated in favour of subchain_length.") + warnings.warn( + " subsampling_rate has been deprecated in favour of subchain_length." + ) subchain_length = subsampling_rate # get the availability flag. @@ -315,7 +316,9 @@ def _sample_parallel( chains.sample(iterations, force_progress_bar) info = {"sampler": "MH", "n_chains": n_chains, "iterations": iterations + 1} - chains = {"chain_{}".format(i): chain.chain for i, chain in enumerate(chains.chains)} + chains = { + "chain_{}".format(i): chain.chain for i, chain in enumerate(chains.chains) + } # return the samples. return {**info, **chains} @@ -392,6 +395,7 @@ def _sample_parallel_da( return result + def _get_result_da( chains, iterations, @@ -427,6 +431,7 @@ def _get_result_da( # return eveything. return {**info, **chains_coarse, **chains_fine} + def _sample_sequential_mlda( posteriors, proposal, @@ -457,7 +462,9 @@ def _sample_sequential_mlda( ) chains[i].sample(iterations) - result = _get_result_mlda(chains, levels, iterations, subchain_lengths, store_coarse_chain) + result = _get_result_mlda( + chains, levels, iterations, subchain_lengths, store_coarse_chain + ) return result @@ -492,10 +499,13 @@ def _sample_parallel_mlda( parallel_chain.sample(iterations, force_progress_bar) chains = parallel_chain.chains - result = _get_result_mlda(chains, levels, iterations, subchain_lengths, store_coarse_chain) + result = _get_result_mlda( + chains, levels, iterations, subchain_lengths, store_coarse_chain + ) return result + def _get_result_mlda( chains, levels, From eb0b26e660435c2265f6eabe2d415c134d5ba8ff Mon Sep 17 00:00:00 2001 From: louisekluge Date: Tue, 18 Mar 2025 17:20:23 +0100 Subject: [PATCH 04/21] fix merge issue --- tinyDA/chain.py | 52 +++++++++++++++++++++++++------------------------ 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/tinyDA/chain.py b/tinyDA/chain.py index 5ca4707..eddbfab 100644 --- a/tinyDA/chain.py +++ b/tinyDA/chain.py @@ -9,7 +9,6 @@ class Chain: - """Chain is a single level MCMC sampler. It is initialsed with a Posterior (which holds the model and the distributions, and returns Links), and a proposal (transition kernel). @@ -95,7 +94,7 @@ def sample(self, iterations, progressbar=True): for i in pbar: if progressbar: pbar.set_description( - "Running chain, \u03B1 = %0.2f" % np.mean(self.accepted[-100:]) + "Running chain, \u03b1 = %0.2f" % np.mean(self.accepted[-100:]) ) # draw a new proposal, given the previous parameters. @@ -130,7 +129,6 @@ def sample(self, iterations, progressbar=True): class DAChain: - """DAChain is a two-level Delayed Acceptance sampler. It takes a coarse and a fine posterior as input, as well as a proposal, which applies to the coarse level only. @@ -164,7 +162,7 @@ class DAChain: promoted_coarse : list List of coarse states ("Links") that are promoted to the fine chain subchain_lengths : list - List of integers that correspond to the actual subchain length that was + List of integers that correspond to the actual subchain length that was sampled randomly from a uniform distribution between 1 and subchain_length. chain_fine : list Samples ("Links") in the fine MCMC chain. @@ -229,7 +227,7 @@ def __init__( self.posterior_fine = posterior_fine self.proposal = proposal self.subchain_length = subchain_length - self.randomize_subchain_length = randomize_subchain_length + self.randomize_subchain_length = randomize_subchain_length # set up lists to hold coarse and fine links, as well as acceptance # accounting @@ -298,7 +296,9 @@ def __init__( self.model_diff, self.bias.get_sigma() ) else: - raise ValueError("Adaptive error model can only be state-dependent, state-independent or None.") + raise ValueError( + "Adaptive error model can only be state-dependent, state-independent or None." + ) self.chain_coarse[-1] = self.posterior_coarse.update_link( self.chain_coarse[-1] @@ -309,19 +309,21 @@ def __init__( if self.randomize_subchain_length: if self.subchain_length == 1: - raise ValueError("Randomize subchain length requires a subchain_length > 1.") + raise ValueError( + "Randomize subchain length requires a subchain_length > 1." + ) if not self.store_coarse_chain: - raise ValueError("Randomize subchain length requires storing the coarse chain.") - + raise ValueError( + "Randomize subchain length requires storing the coarse chain." + ) + if self.randomize_subchain_length: - # this private method returns np.random.randint(-self.subsampling_rate,0) + # this private method returns np.random.randint(-self.subchain_length,0) self._get_proposal_index = self._get_random_proposal_index else: # this private method always returns -1 self._get_proposal_index = self._get_fixed_proposal_index - - def sample(self, iterations, progressbar=True): """ Parameters @@ -342,7 +344,7 @@ def sample(self, iterations, progressbar=True): for i in pbar: if progressbar: pbar.set_description( - "Running chain, \u03B1_c = {0:.3f}, \u03B1_f = {1:.2f}".format( + "Running chain, \u03b1_c = {0:.3f}, \u03b1_f = {1:.2f}".format( np.mean( self.accepted_coarse[-int(100 * self.subchain_length) :] ), @@ -357,9 +359,7 @@ def sample(self, iterations, progressbar=True): if sum(self.accepted_coarse[-self.subchain_length :]) == 0: self.chain_fine.append(self.chain_fine[-1]) self.accepted_fine.append(False) - self.chain_coarse.append( - self.chain_coarse[-(self.subchain_length + 1)] - ) + self.chain_coarse.append(self.chain_coarse[-(self.subchain_length + 1)]) self.accepted_coarse.append(False) self.is_coarse.append(False) @@ -370,9 +370,9 @@ def sample(self, iterations, progressbar=True): proposal_link_fine = self.posterior_fine.create_link( self.chain_coarse[proposal_index].parameters ) - self.promoted_coarse.append(self.chain_coarse[proposal_index]) + self.promoted_coarse.append(self.chain_coarse[proposal_index]) # add effective subchain lenght to list - self.subchain_lengths.append(proposal_index + self.subchain_length+1) + self.subchain_lengths.append(proposal_index + self.subchain_length + 1) # compute the delayed acceptance probability. if self.adaptive_error_model == "state-dependent": @@ -445,7 +445,9 @@ def _sample_coarse(self): def _get_state_dependent_acceptance(self, proposal_link_fine): # compute the bias at the proposal. - bias_next = proposal_link_fine.model_output - self.promoted_coarse[-1].model_output + bias_next = ( + proposal_link_fine.model_output - self.promoted_coarse[-1].model_output + ) # create a throwaway link representing the reverse state. coarse_state_biased = self.posterior_coarse.update_link( @@ -523,16 +525,14 @@ def _update_error_model(self): self.chain_coarse[-1] = self.posterior_coarse.update_link(self.chain_coarse[-1]) def _get_random_proposal_index(self): - random_proposal_index = np.random.randint(-self.subchain_length,0) + random_proposal_index = np.random.randint(-self.subchain_length, 0) return random_proposal_index - + def _get_fixed_proposal_index(self): return -1 - class MLDAChain: - """MLDAChain is a Multilevel Delayed Acceptance sampler. It takes a list of posteriors of increasing level as input, as well as a proposal, which applies to the coarsest level only. @@ -665,7 +665,9 @@ def __init__( elif self.adaptive_error_model == "state-dependent": pass else: - raise ValueError("Adaptive error model can only be state-dependent, state-independent or None.") + raise ValueError( + "Adaptive error model can only be state-dependent, state-independent or None." + ) # update the first coarser link with the adaptive error model. self.proposal.chain[-1] = self.proposal.posterior.update_link( self.proposal.chain[-1] @@ -697,7 +699,7 @@ def sample(self, iterations, progressbar=True): for i in pbar: if progressbar: pbar.set_description( - "Running chain, \u03B1 = %0.2f" % np.mean(self.accepted[-100:]) + "Running chain, \u03b1 = %0.2f" % np.mean(self.accepted[-100:]) ) # remove everything except the latest coarse link, if the coarse From db7459c3abbc08699e7d04ee7127d0351ca61ea4 Mon Sep 17 00:00:00 2001 From: louisekluge Date: Tue, 18 Mar 2025 19:00:26 +0100 Subject: [PATCH 05/21] defined _get_proposal_index --- tinyDA/proposal.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/tinyDA/proposal.py b/tinyDA/proposal.py index fef480b..7564b25 100644 --- a/tinyDA/proposal.py +++ b/tinyDA/proposal.py @@ -1651,6 +1651,13 @@ def get_acceptance( + previous_link_below.posterior - proposal_link_below.posterior ) + + def _get_random_proposal_index(self): + random_proposal_index = np.random.randint(-self.subchain_length, 0) + return random_proposal_index + + def _get_fixed_proposal_index(self): + return -1 class DREAM(DREAMZ, SharedArchiveProposal): @@ -1682,12 +1689,4 @@ def adapt(self, **kwargs): def make_proposal(self, link): Z = self.read_archive() - return super().make_proposal(link, Z) - - - def _get_random_proposal_index(self): - random_proposal_index = np.random.randint(-self.subchain_length, 0) - return random_proposal_index - - def _get_fixed_proposal_index(self): - return -1 \ No newline at end of file + return super().make_proposal(link, Z) \ No newline at end of file From 740080474120758de9216049e62ea867d85e7a69 Mon Sep 17 00:00:00 2001 From: louisekluge Date: Wed, 19 Mar 2025 11:44:25 +0100 Subject: [PATCH 06/21] modified make_mlda_proposal and make_base_proposal to return a proposal according to proposal_index --- tinyDA/proposal.py | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/tinyDA/proposal.py b/tinyDA/proposal.py index 7564b25..063b143 100644 --- a/tinyDA/proposal.py +++ b/tinyDA/proposal.py @@ -1394,29 +1394,29 @@ def __init__( self.store_coarse_chain = store_coarse_chain # set whether to randomize the subchain length - self.randomize_subchain_lenght = randomize_subchain_length + self.randomize_subchain_length = randomize_subchain_length # check that store coarse chain is on in case of randomized subchain length - if self.randomize_subchain_lenght: + if self.randomize_subchain_length: if not self.store_coarse_chain: raise ValueError( "Randomize subchain length requires storing the coarse chain." ) + + # set proposal index + if self.randomize_subchain_length: + # this private method returns np.random.randint(-self.subchain_length,0) + self._get_proposal_index = self._get_random_proposal_index + else: + # this private method always returns -1 + self._get_proposal_index = self._get_fixed_proposal_index # if this level is not the coarsest level. if self.level > 0: # internalise the subchain length. self.subchain_length = subchain_lengths[-1] - # set proposal index - if self.randomize_subchain_length: - # this private method returns np.random.randint(-self.subchain_length,0) - self._get_proposal_index = self._get_random_proposal_index - else: - # this private method always returns -1 - self._get_proposal_index = self._get_fixed_proposal_index - # set MDLA as the proposal on the next-coarser level. - + # set MDLA as the proposal on the next-coarser level. self.proposal = MLDA( posteriors[:-1], proposal, @@ -1424,7 +1424,7 @@ def __init__( self.initial_parameters, self.adaptive_error_model, self.store_coarse_chain, - self.randomize_subchain_lenght, + self.randomize_subchain_length, ) # set the current level make_proposal method to MLDA. @@ -1525,7 +1525,7 @@ def _reset_chain(self): if self.level > 0: self.proposal._reset_chain() - def make_mlda_proposal(self, subchain_length, proposal_index): + def make_mlda_proposal(self, subchain_length): """ Parameters ---------- @@ -1535,7 +1535,7 @@ def make_mlda_proposal(self, subchain_length, proposal_index): Index of the sample to be promoted in this subchain. This only differs from subchain_length if randomize_subchain_length is true. """ - + proposal_index = self._get_proposal_index() # iterate through the subsamples. for i in range(subchain_length): # create a proposal from the next-lower level, @@ -1605,12 +1605,14 @@ def make_mlda_proposal(self, subchain_length, proposal_index): self.proposal.chain[-1] = self.proposal.posterior.update_link( self.proposal.chain[-1] ) - + # return the latest link. - return self.chain[-1].parameters + return self.chain[-proposal_index].parameters def make_base_proposal(self, subchain_length): # iterate through the subsamples. + proposal_index = self._get_proposal_index() + for i in range(subchain_length): # draw a new proposal, given the previous parameters. proposal = self.proposal.make_proposal(self.chain[-1]) @@ -1638,8 +1640,9 @@ def make_base_proposal(self, subchain_length): parameters_previous=self.chain[-2].parameters, accepted=self.accepted, ) + # return the latest link. - return self.chain[-1].parameters + return self.chain[-proposal_index].parameters def get_acceptance( self, proposal_link, previous_link, proposal_link_below, previous_link_below From ffd11d401ab1d7991e92b3e5df75f23d79a9d6b1 Mon Sep 17 00:00:00 2001 From: louisekluge Date: Thu, 20 Mar 2025 16:09:16 +0100 Subject: [PATCH 07/21] randomize_subchain_lengths in chain.py --- tinyDA/chain.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/tinyDA/chain.py b/tinyDA/chain.py index eddbfab..c5208e7 100644 --- a/tinyDA/chain.py +++ b/tinyDA/chain.py @@ -557,6 +557,10 @@ class MLDAChain: List of bool, signifying whether a proposal was accepted or not. adaptive_error_model : str or None The adaptive error model, see e.g. Cui et al. (2019). + randomize_subchain_length : bool, optional + Randomizes the subchain lengths, see e.g. Liu (2009). Sample to be promoted + is drawn from uniform distribution, between 1 and subchain_length. + Default is False. bias : tinaDA.RecursiveSampleMoments A recursive Gaussian error model that computes the sample moments of the next-coarser bias. @@ -575,6 +579,7 @@ def __init__( initial_parameters=None, adaptive_error_model=None, store_coarse_chain=True, + randomize_subchain_length=False, ): """ Parameters @@ -597,13 +602,17 @@ def __init__( store_coarse_chain : bool, optional Whether to store the coarse chains. Disable if the sampler is taking up too much memory. Default is True. + randomize_subchain_length : bool, optional + Randomizes the subchain lengths, see e.g. Liu (2009). Sample + to be promoted is drawn from uniform distribution, between 1 + and subchain_length. Default is False. """ # internalise the finest posterior and set the level. self.posterior = posteriors[-1] self.level = len(posteriors) - 1 - # set the furrent level subchain length. + # set the current level subchain length. self.subchain_length = subchain_lengths[-1] # initialise a list, which holds the links. @@ -630,6 +639,9 @@ def __init__( # set whether to store the coarse chain self.store_coarse_chain = store_coarse_chain + # set wether to randomize subchain lengths + self.randomize_subchain_length = randomize_subchain_length + # set the effective proposal to MLDA which runs on the next-coarser level. self.proposal = MLDA( posteriors[:-1], @@ -638,6 +650,7 @@ def __init__( self.initial_parameters, self.adaptive_error_model, self.store_coarse_chain, + self.randomize_subchain_length ) # set up the adaptive error model. From 32cdaf76af09103fc669caac117184af0e467d5f Mon Sep 17 00:00:00 2001 From: louisekluge Date: Thu, 20 Mar 2025 16:22:07 +0100 Subject: [PATCH 08/21] collecting promoted samples on every level in a list --- tinyDA/proposal.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tinyDA/proposal.py b/tinyDA/proposal.py index 063b143..efb31cb 100644 --- a/tinyDA/proposal.py +++ b/tinyDA/proposal.py @@ -1556,7 +1556,7 @@ def make_mlda_proposal(self, subchain_length): alpha = self.proposal.get_acceptance( proposal_link, self.chain[-1], - self.proposal.chain[-1], + self.proposal.chain[-1], # this is the element forwarded by the subchain self.proposal.chain[-(self.subchain_length + 1)], ) @@ -1605,9 +1605,9 @@ def make_mlda_proposal(self, subchain_length): self.proposal.chain[-1] = self.proposal.posterior.update_link( self.proposal.chain[-1] ) - + self.promoted.append(self.chain[proposal_index]) # return the latest link. - return self.chain[-proposal_index].parameters + return self.chain[proposal_index].parameters def make_base_proposal(self, subchain_length): # iterate through the subsamples. @@ -1640,9 +1640,9 @@ def make_base_proposal(self, subchain_length): parameters_previous=self.chain[-2].parameters, accepted=self.accepted, ) - + self.promoted.append(self.chain[proposal_index]) # return the latest link. - return self.chain[-proposal_index].parameters + return self.chain[proposal_index].parameters def get_acceptance( self, proposal_link, previous_link, proposal_link_below, previous_link_below From f49881b416c48bd2ef122cc24cf1a893c66fbef9 Mon Sep 17 00:00:00 2001 From: louisekluge Date: Thu, 20 Mar 2025 17:26:19 +0100 Subject: [PATCH 09/21] enable parallel sampling with randomized subchain lengths --- tinyDA/ray.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tinyDA/ray.py b/tinyDA/ray.py index 8d0bf96..b55cbd9 100644 --- a/tinyDA/ray.py +++ b/tinyDA/ray.py @@ -149,6 +149,7 @@ def __init__( posteriors, proposal, subchain_lengths=None, + randomize_subchain_length=False, n_chains=2, initial_parameters=None, adaptive_error_model=None, @@ -171,6 +172,8 @@ def __init__( # whether to store the coarse chain. self.store_coarse_chain = store_coarse_chain + self.randomize_subchain_length = randomize_subchain_length + # initialise Ray. ray.init(ignore_reinit_error=True) @@ -180,6 +183,7 @@ def __init__( self.posteriors, self.proposal[i], self.subchain_lengths, + self.randomize_subchain_length, self.initial_parameters[i], self.adaptive_error_model, self.store_coarse_chain, From 12039d33dceefb372d1fcf35812d7b907e96d006 Mon Sep 17 00:00:00 2001 From: louisekluge Date: Mon, 24 Mar 2025 17:34:15 +0100 Subject: [PATCH 10/21] minor changes --- tinyDA/proposal.py | 10 +++++----- tinyDA/sampler.py | 16 ++++++++++++++-- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/tinyDA/proposal.py b/tinyDA/proposal.py index efb31cb..33dffe8 100644 --- a/tinyDA/proposal.py +++ b/tinyDA/proposal.py @@ -1535,7 +1535,7 @@ def make_mlda_proposal(self, subchain_length): Index of the sample to be promoted in this subchain. This only differs from subchain_length if randomize_subchain_length is true. """ - proposal_index = self._get_proposal_index() + proposal_index = self._get_proposal_index(subchain_length) # iterate through the subsamples. for i in range(subchain_length): # create a proposal from the next-lower level, @@ -1611,7 +1611,7 @@ def make_mlda_proposal(self, subchain_length): def make_base_proposal(self, subchain_length): # iterate through the subsamples. - proposal_index = self._get_proposal_index() + proposal_index = self._get_proposal_index(subchain_length) for i in range(subchain_length): # draw a new proposal, given the previous parameters. @@ -1655,11 +1655,11 @@ def get_acceptance( - proposal_link_below.posterior ) - def _get_random_proposal_index(self): - random_proposal_index = np.random.randint(-self.subchain_length, 0) + def _get_random_proposal_index(self, subchain_length): + random_proposal_index = np.random.randint(-subchain_length, 0) return random_proposal_index - def _get_fixed_proposal_index(self): + def _get_fixed_proposal_index(self, subchain_length): return -1 diff --git a/tinyDA/sampler.py b/tinyDA/sampler.py index 74f7b35..c079a18 100644 --- a/tinyDA/sampler.py +++ b/tinyDA/sampler.py @@ -79,6 +79,9 @@ def sample( the same subchain length will be used for all levels. If running single-level MCMC, this parameter is ignored. Default is 1, resulting in "classic" DA MCMC for a two-level model. + randomize_subchain_length: bool, optional + Randomizes the subchain length, as described in Lykkegaard et al. + (2023). Default is false. adaptive_error_model : str or None, optional The adaptive error model, see e.g. Cui et al. (2019). If running single-level MCMC, this parameter is ignored. Default is None @@ -273,6 +276,7 @@ def sample( n_chains, initial_parameters, subchain_lengths, + randomize_subchain_length, adaptive_error_model, store_coarse_chain, ) @@ -285,6 +289,7 @@ def sample( n_chains, initial_parameters, subchain_lengths, + randomize_subchain_length, adaptive_error_model, store_coarse_chain, force_progress_bar, @@ -450,6 +455,7 @@ def _sample_sequential_mlda( n_chains, initial_parameters, subchain_lengths, + randomize_subchain_length, adaptive_error_model, store_coarse_chain, ): @@ -466,6 +472,7 @@ def _sample_sequential_mlda( posteriors, proposal[i], subchain_lengths, + randomize_subchain_length, initial_parameters[i], adaptive_error_model, store_coarse_chain, @@ -474,7 +481,7 @@ def _sample_sequential_mlda( chains[i].sample(iterations) result = _get_result_mlda( - chains, levels, iterations, subchain_lengths, store_coarse_chain + chains, levels, iterations, subchain_lengths, randomize_subchain_length, store_coarse_chain, ) return result @@ -487,6 +494,7 @@ def _sample_parallel_mlda( n_chains, initial_parameters, subchain_lengths, + randomize_subchain_length, adaptive_error_model, store_coarse_chain, force_progress_bar, @@ -502,6 +510,7 @@ def _sample_parallel_mlda( posteriors, proposal, subchain_lengths, + randomize_subchain_length, n_chains, initial_parameters, adaptive_error_model, @@ -511,7 +520,7 @@ def _sample_parallel_mlda( chains = parallel_chain.chains result = _get_result_mlda( - chains, levels, iterations, subchain_lengths, store_coarse_chain + chains, levels, iterations, subchain_lengths, randomize_subchain_length, store_coarse_chain, ) return result @@ -522,7 +531,9 @@ def _get_result_mlda( levels, iterations, subchain_lengths, + randomize_subchain_length, store_coarse_chain, + ): info = { @@ -531,6 +542,7 @@ def _get_result_mlda( "iterations": iterations + 1, "levels": levels, "subchain_lengths": subchain_lengths, + "randomize_subchain_length":randomize_subchain_length, } # collect and return the samples. From 21f7537e0ff9ba38db881c5b2d77f5e162fcfbdb Mon Sep 17 00:00:00 2001 From: louisekluge Date: Tue, 25 Mar 2025 15:50:39 +0100 Subject: [PATCH 11/21] fix order in MLDA chain --- tinyDA/chain.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tinyDA/chain.py b/tinyDA/chain.py index c5208e7..3347f6d 100644 --- a/tinyDA/chain.py +++ b/tinyDA/chain.py @@ -576,10 +576,10 @@ def __init__( posteriors, proposal, subchain_lengths, + randomize_subchain_length=False, initial_parameters=None, adaptive_error_model=None, store_coarse_chain=True, - randomize_subchain_length=False, ): """ Parameters From f611b38431c2a508353514ce300105d01a2c3768 Mon Sep 17 00:00:00 2001 From: louisekluge Date: Tue, 25 Mar 2025 15:51:27 +0100 Subject: [PATCH 12/21] formatting --- tinyDA/diagnostics.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/tinyDA/diagnostics.py b/tinyDA/diagnostics.py index 06cfd51..964fe31 100644 --- a/tinyDA/diagnostics.py +++ b/tinyDA/diagnostics.py @@ -20,7 +20,7 @@ def to_inference_data(chain, level="fine", burnin=0, parameter_names=None): burnin : int, optional The burnin length. The default is 0. parameter_names : list, optional - List of the names of the parameters in the chain, in the same order + List of the names of the parameters in the chain, in the same order as they appear in each link. Default is None, meaning that parameters will be named [x1, x2, ...]. @@ -149,8 +149,10 @@ def get_samples(chain, attribute="parameters", level="fine", burnin=0): "attribute": attribute, } - if attribute == 'stats': - getattribute = lambda link, attribute: np.array([link.prior, link.likelihood, link.posterior]) + if attribute == "stats": + getattribute = lambda link, attribute: np.array( + [link.prior, link.likelihood, link.posterior] + ) else: getattribute = lambda link, attribute: getattr(link, attribute) @@ -159,7 +161,10 @@ def get_samples(chain, attribute="parameters", level="fine", burnin=0): # extract link attribute. for i in range(chain["n_chains"]): samples["chain_{}".format(i)] = np.array( - [getattribute(link, attribute) for link in chain["chain_{}".format(i)][burnin:]] + [ + getattribute(link, attribute) + for link in chain["chain_{}".format(i)][burnin:] + ] ) # if the input is a Delayed Acceptance chain. @@ -177,7 +182,6 @@ def get_samples(chain, attribute="parameters", level="fine", burnin=0): ] ) - # if the input is a Delayed Acceptance chain. elif chain["sampler"] == "MLDA": # copy the subchain length across. From d080e8ebdccc939355de6aa99692c947c11ee48b Mon Sep 17 00:00:00 2001 From: louisekluge Date: Tue, 25 Mar 2025 15:51:48 +0100 Subject: [PATCH 13/21] formatting --- tinyDA/distributions.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tinyDA/distributions.py b/tinyDA/distributions.py index 6518f78..bdbda2b 100644 --- a/tinyDA/distributions.py +++ b/tinyDA/distributions.py @@ -6,7 +6,6 @@ class JointPrior: - """JointPrior is a wrapper for a list of priors, if the parameters have different types of priors. The order must match the order of parameters for the model, since parameters are unnamed. @@ -107,7 +106,6 @@ def CompositePrior(*args, **kwargs): class PoissonPointProcess: - """PoissonPointProcess is a geometric prior, where the number of points has a Poisson distribution and their locations are uniformly distributed on the domain. Additional geometric attributes of the points can also be From de142e5ea600fe906ecd2e8369c901d8bcddba6f Mon Sep 17 00:00:00 2001 From: louisekluge Date: Tue, 25 Mar 2025 15:52:01 +0100 Subject: [PATCH 14/21] formatting --- tinyDA/link.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tinyDA/link.py b/tinyDA/link.py index 152647f..37ca188 100644 --- a/tinyDA/link.py +++ b/tinyDA/link.py @@ -1,5 +1,4 @@ class Link: - """The Link class holds all relevant information about an MCMC sample, i.e. parameters, prior log-desnity, model output, log-likelihood and possibly a Quantity of Interest (QoI) @@ -21,7 +20,6 @@ class Link: """ def __init__(self, parameters, prior, model_output, likelihood, qoi=None): - """ Parameters ---------- From c0b39274572c2dab6cfb2ed5bb554786cb1012f2 Mon Sep 17 00:00:00 2001 From: louisekluge Date: Tue, 25 Mar 2025 15:52:19 +0100 Subject: [PATCH 15/21] formatting --- tinyDA/umbridge.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tinyDA/umbridge.py b/tinyDA/umbridge.py index 16e958c..7118264 100644 --- a/tinyDA/umbridge.py +++ b/tinyDA/umbridge.py @@ -1,7 +1,7 @@ import numpy as np -class UmBridgeModel: +class UmBridgeModel: """UmBridgeModel provides a wrapper for an UM-Bridge HTTPModel, which allows for using UM-Bridge forward operators directly in a tinyDA BlackBoxLinkFactory. @@ -22,7 +22,6 @@ class UmBridgeModel: """ def __init__(self, umbridge_model, pre=None, umbridge_config={}): - """ Parameters ---------- @@ -54,7 +53,6 @@ def __init__(self, umbridge_model, pre=None, umbridge_config={}): self.umbridge_config = umbridge_config def __call__(self, parameters): - """ Parameters ---------- @@ -86,7 +84,9 @@ def _gradient(self, parameters, sensitivity): umbridge_sens = sensitivity.tolist() # send converted model input the the UM-Bridge model. - umbridge_gradient = self.umbridge_model.gradient(0, 0, umbridge_input, umbridge_sens, self.umbridge_config) + umbridge_gradient = self.umbridge_model.gradient( + 0, 0, umbridge_input, umbridge_sens, self.umbridge_config + ) # convert the UM-Bridge output back to a NumPy array. gradient = np.array(umbridge_gradient).flatten() From 30ee33810257c82e9d0bd7d8188a3444e585bae3 Mon Sep 17 00:00:00 2001 From: louisekluge Date: Tue, 25 Mar 2025 15:52:38 +0100 Subject: [PATCH 16/21] formatting --- tinyDA/utils.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tinyDA/utils.py b/tinyDA/utils.py index ca7cbcc..8bf31e9 100644 --- a/tinyDA/utils.py +++ b/tinyDA/utils.py @@ -7,7 +7,6 @@ class RecursiveSampleMoments: - """Iteratively constructs a sample mean and covariance, given input samples. Used to capture an estimate of the mean and covariance of the bias of an MLDA coarse model, and for the Adaptive Metropolis (AM) proposal. @@ -125,7 +124,6 @@ def update(self, x): class ZeroMeanRecursiveSampleMoments(RecursiveSampleMoments): - """Iteratively constructs a sample covariance, with zero mean given input samples. It is a specialised version of RecursiveSampleMoments, used only in the state dependent error model. From 650f18608a4e041b9835e744dd6747cbd613cade Mon Sep 17 00:00:00 2001 From: louisekluge Date: Wed, 26 Mar 2025 19:55:02 +0100 Subject: [PATCH 17/21] sampler now also returns promoted samples for all but the top level --- tinyDA/sampler.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tinyDA/sampler.py b/tinyDA/sampler.py index c079a18..0c30dea 100644 --- a/tinyDA/sampler.py +++ b/tinyDA/sampler.py @@ -559,11 +559,20 @@ def _get_result_mlda( "chain_l{}_{}".format(i, j): list(compress(chain.chain, chain.is_local)) for j, chain in enumerate(_current) } + promoted_current = { + "promoted_l{}_{}".format(i, j): list(compress(chain.promoted, chain.is_local)) + for j, chain in enumerate(_current) + } else: chains_current = { "chain_l{}_{}".format(i, j): None for j, chain in enumerate(_current) } - chains_all = {**chains_all, **chains_current} + promoted_current = { + "promoted_l{}_{}".format(i, j): None for j, chain in enumerate(_current) + } + chains_all = {**chains_all, **chains_current, **promoted_current} _current = [chain.proposal for chain in _current] + + return {**info, **chains_all} From 6b6da3bd13ea1c81808cd17d1dd386e6b9b427dc Mon Sep 17 00:00:00 2001 From: louisekluge Date: Mon, 7 Apr 2025 13:55:16 +0200 Subject: [PATCH 18/21] randomized subchain lengths, working --- tinyDA/proposal.py | 1 + tinyDA/ray.py | 6 ++++-- tinyDA/sampler.py | 5 +++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/tinyDA/proposal.py b/tinyDA/proposal.py index 33dffe8..9da6bc9 100644 --- a/tinyDA/proposal.py +++ b/tinyDA/proposal.py @@ -1386,6 +1386,7 @@ def __init__( self.chain.append(self.posterior.create_link(self.initial_parameters)) self.accepted.append(True) self.is_local.append(False) + self.promoted.append(self.chain[-1]) # set the adaptive error model as an attribute. self.adaptive_error_model = adaptive_error_model diff --git a/tinyDA/ray.py b/tinyDA/ray.py index b55cbd9..8d7eeee 100644 --- a/tinyDA/ray.py +++ b/tinyDA/ray.py @@ -35,7 +35,7 @@ class ParallelChain: Runs the MCMC for the specified number of iterations. """ - def __init__(self, posterior, proposal, n_chains=2, initial_parameters=None): + def __init__(self, posterior, proposal, n_chains=2, initial_parameters=None, randomize_subchain_length=False): """ Parameters ---------- @@ -61,13 +61,15 @@ def __init__(self, posterior, proposal, n_chains=2, initial_parameters=None): # set the initial parameters. self.initial_parameters = initial_parameters + self.randomize_suchain_length = randomize_subchain_length + # initialise Ray. ray.init(ignore_reinit_error=True) # set up the parallel chains as Ray actors. self.remote_chains = [ RemoteChain.remote( - self.posterior, self.proposal[i], self.initial_parameters[i] + self.posterior, self.proposal[i], self.initial_parameters[i], self.randomize_suchain_length ) for i in range(self.n_chains) ] diff --git a/tinyDA/sampler.py b/tinyDA/sampler.py index 0c30dea..c53f0a7 100644 --- a/tinyDA/sampler.py +++ b/tinyDA/sampler.py @@ -322,13 +322,14 @@ def _sample_parallel( n_chains, initial_parameters, force_progress_bar, + randomize_subchain_length, ): """Helper function for tinyDA.sample()""" print("Sampling {} chains in parallel".format(n_chains)) # create a parallel sampling instance and sample. - chains = ParallelChain(posteriors[0], proposal, n_chains, initial_parameters) + chains = ParallelChain(posteriors[0], proposal, n_chains, initial_parameters, randomize_subchain_length) chains.sample(iterations, force_progress_bar) info = {"sampler": "MH", "n_chains": n_chains, "iterations": iterations + 1} @@ -560,7 +561,7 @@ def _get_result_mlda( for j, chain in enumerate(_current) } promoted_current = { - "promoted_l{}_{}".format(i, j): list(compress(chain.promoted, chain.is_local)) + "promoted_l{}_{}".format(i, j): chain.promoted for j, chain in enumerate(_current) } else: From 04b1de45a00e5258fd2721da83331b23d9190771 Mon Sep 17 00:00:00 2001 From: louisekluge Date: Mon, 7 Apr 2025 13:55:36 +0200 Subject: [PATCH 19/21] sketch for MLDA_estimator --- tinyDA/diagnostics.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tinyDA/diagnostics.py b/tinyDA/diagnostics.py index 964fe31..3636a7e 100644 --- a/tinyDA/diagnostics.py +++ b/tinyDA/diagnostics.py @@ -211,3 +211,29 @@ def get_samples(chain, attribute="parameters", level="fine", burnin=0): # return the samples. return samples + + +def MLDA_estimators(chain, attribute="qoi", variable="x0", burnin=0): + """Computes the unbiased Monte-Carlo estimator for Multilevel Delayed Acceptance + chains, as derived in Lykkegaard et al. 2023. + + Parameters + ---------- + chain : dict + A dict as returned by tinyDA.sample, containing chain information + and lists of tinyDA.Link instances. + attribute : str, optional + Which link attribute ('parameters', 'model_output', 'qoi' or 'stats') + to extract. The default is 'parameters'. + variable : str, optional + Which variable of the posterior or qoi to marginalize over. + burnin : int, optional + The burnin length. The default is 0. + Returns + ---------- + float + Outpu of the estimator computation. + """ + + estimator = 0 + return estimator \ No newline at end of file From 881849e8c0282243c570e16bde9c466851bb893c Mon Sep 17 00:00:00 2001 From: louisekluge Date: Tue, 8 Apr 2025 09:49:01 +0200 Subject: [PATCH 20/21] choosing self.proposal.promoted[-1] instead of self.proposal.chain[-1] for AEM --- tinyDA/proposal.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tinyDA/proposal.py b/tinyDA/proposal.py index 9da6bc9..850ca68 100644 --- a/tinyDA/proposal.py +++ b/tinyDA/proposal.py @@ -1435,7 +1435,7 @@ def __init__( if self.adaptive_error_model is not None: # compute the difference between coarse and fine level. self.model_diff = ( - self.chain[-1].model_output - self.proposal.chain[-1].model_output + self.chain[-1].model_output - self.proposal.promoted[-1].model_output ) # set up the state-independent adaptive error model. @@ -1522,7 +1522,7 @@ def align_chain(self, parameters, accepted): def _reset_chain(self): # remove everything except the latest coarse link, if the coarse # chain shouldn't be stored. - self.chain = [self.chain[-1]] + self.chain = [self.chain[-self.proposal_index]] if self.level > 0: self.proposal._reset_chain() From 8099094f19b0b5e7e05ccbd57214bd8540c1985a Mon Sep 17 00:00:00 2001 From: louisekluge Date: Fri, 16 May 2025 13:33:48 +0200 Subject: [PATCH 21/21] correction --- tinyDA/sampler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tinyDA/sampler.py b/tinyDA/sampler.py index c53f0a7..12c4815 100644 --- a/tinyDA/sampler.py +++ b/tinyDA/sampler.py @@ -543,7 +543,7 @@ def _get_result_mlda( "iterations": iterations + 1, "levels": levels, "subchain_lengths": subchain_lengths, - "randomize_subchain_length":randomize_subchain_length, + "randomize_subchain_length": randomize_subchain_length, } # collect and return the samples.