From 95b019e924279b14b3a01d93d64fa18dd632a822 Mon Sep 17 00:00:00 2001 From: jlnav Date: Thu, 12 Feb 2026 16:44:32 -0600 Subject: [PATCH 1/7] some fixes (?), experimenting with calling only suggest on LibensembleGenInterfacer's, plus refactoring the aposmm calling script into how I'd like it to look eventually. --- libensemble/history.py | 2 +- .../test_asktell_aposmm_nlopt.py | 31 ++++++++++++------- libensemble/utils/runners.py | 4 +-- pixi.lock | 2 +- 4 files changed, 23 insertions(+), 16 deletions(-) diff --git a/libensemble/history.py b/libensemble/history.py index 3d3b5bc6fd..0d036b0415 100644 --- a/libensemble/history.py +++ b/libensemble/history.py @@ -107,7 +107,7 @@ def __init__( self.last_ended = -1 def _append_new_fields(self, H_f: npt.NDArray) -> None: - dtype_new = np.dtype(list(set(self.H.dtype.descr + H_f.dtype.descr))) + dtype_new = np.dtype(list(set(self.H.dtype.descr + np.lib.recfunctions.repack_fields(H_f).dtype.descr))) H_new = np.zeros(len(self.H), dtype=dtype_new) old_fields = self.H.dtype.names for field in old_fields: diff --git a/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py b/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py index 83e3bf6253..765d44c703 100644 --- a/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py +++ b/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py @@ -15,7 +15,6 @@ # TESTSUITE_COMMS: local mpi tcp # TESTSUITE_NPROCS: 3 -import sys from math import gamma, pi, sqrt import numpy as np @@ -26,7 +25,6 @@ from libensemble.sim_funcs.executor_hworld import executor_hworld as sim_f_exec # Import libEnsemble items for this test -from libensemble.sim_funcs.six_hump_camel import six_hump_camel as sim_f libensemble.gen_funcs.rc.aposmm_optimizers = "nlopt" from time import time @@ -34,11 +32,25 @@ from gest_api.vocs import VOCS from libensemble import Ensemble -from libensemble.alloc_funcs.persistent_aposmm_alloc import persistent_aposmm_alloc as alloc_f +from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens as alloc_f from libensemble.gen_classes import APOSMM from libensemble.specs import AllocSpecs, ExitCriteria, GenSpecs, SimSpecs from libensemble.tests.regression_tests.support import six_hump_camel_minima as minima + +def six_hump_camel_func(x): + """ + Definition of the six-hump camel + """ + x1 = x["core"] + x2 = x["edge"] + term1 = (4 - 2.1 * x1**2 + (x1**4) / 3) * x1**2 + term2 = x1 * x2 + term3 = (-4 + 4 * x2**2) * x2**2 + + return {"energy": term1 + term2 + term3} + + # Main block is necessary only when using local comms with spawn start method (default on macOS and Windows). if __name__ == "__main__": @@ -49,19 +61,16 @@ if workflow.is_manager: start_time = time() - if workflow.nworkers < 2: - sys.exit("Cannot run with a persistent worker if only one worker -- aborting...") - n = 2 workflow.alloc_specs = AllocSpecs(alloc_f=alloc_f) + workflow.libE_specs.gen_on_manager = True + vocs = VOCS( variables={"core": [-3, 3], "edge": [-2, 2], "core_on_cube": [-3, 3], "edge_on_cube": [-2, 2]}, objectives={"energy": "MINIMIZE"}, ) - workflow.libE_specs.gen_on_manager = True - aposmm = APOSMM( vocs, max_active_runs=workflow.nworkers, # should this match nworkers always? practically? @@ -74,17 +83,15 @@ ftol_abs=1e-6, ) - # SH TODO - dont want this stuff duplicated - pass with vocs instead workflow.gen_specs = GenSpecs( - persis_in=["x", "x_on_cube", "sim_id", "local_min", "local_pt", "f"], generator=aposmm, + vocs=vocs, batch_size=5, initial_batch_size=10, - user={"initial_sample_size": 100}, ) if run == 0: - workflow.sim_specs = SimSpecs(sim_f=sim_f, inputs=["x"], outputs=[("f", float)]) + workflow.sim_specs = SimSpecs(simulator=six_hump_camel_func, vocs=vocs) workflow.exit_criteria = ExitCriteria(sim_max=2000) elif run == 1: workflow.persis_info["num_gens_started"] = 0 diff --git a/libensemble/utils/runners.py b/libensemble/utils/runners.py index b0c78a7bc6..5fa6c9017a 100644 --- a/libensemble/utils/runners.py +++ b/libensemble/utils/runners.py @@ -191,12 +191,12 @@ def _start_generator_loop(self, tag, Work, H_in) -> npt.NDArray: class LibensembleGenThreadRunner(StandardGenRunner): def _get_initial_suggest(self, libE_info) -> npt.NDArray: """Get initial batch from generator based on generator type""" - return self.gen.suggest_numpy() # libE really needs to receive the *entire* initial batch from a threaded gen + return self.gen.suggest() # libE really needs to receive the *entire* initial batch from a threaded gen def _suggest_and_send(self): """Loop over generator's outbox contents, send to manager""" while not self.gen._running_gen_f.outbox.empty(): # recv/send any outstanding messages - points = self.gen.suggest_numpy() + points = self.gen.suggest() if callable(getattr(self.gen, "suggest_updates", None)): updates = self.gen.suggest_updates() else: diff --git a/pixi.lock b/pixi.lock index b0c5eb203a..07f3d567dd 100644 --- a/pixi.lock +++ b/pixi.lock @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c10357cee7d3813245c607ea39fde22da46119409451949c0780225515aa5afa +oid sha256:92f8b98ae9e038ca9165e0c306116ca5144988ac1451e720c4d5c2d1af14cbd9 size 1216383 From 35fa55ecb02b52bcf492c976fed22ea29501ff63 Mon Sep 17 00:00:00 2001 From: jlnav Date: Fri, 13 Feb 2026 16:07:09 -0600 Subject: [PATCH 2/7] various fixes, adjustments, new functions. unmap aposmm's output numpy array into the H constructed from VOCS, then do the reverse for the numpy array from H back into APOSMM. Also ensure that _id data becomes sim_id, and vice-versa --- libensemble/gen_classes/aposmm.py | 14 +++++- libensemble/utils/misc.py | 77 ++++++++++++++++++++++++++++--- libensemble/utils/runners.py | 17 +++++-- pixi.lock | 2 +- 4 files changed, 96 insertions(+), 14 deletions(-) diff --git a/libensemble/gen_classes/aposmm.py b/libensemble/gen_classes/aposmm.py index 5c92d544d1..71c2fcb437 100644 --- a/libensemble/gen_classes/aposmm.py +++ b/libensemble/gen_classes/aposmm.py @@ -293,7 +293,13 @@ def __init__( def _slot_in_data(self, results): """Slot in libE_calc_in and trial data into corresponding array fields. *Initial sample only!!*""" - self._ingest_buf[self._n_buffd_results : self._n_buffd_results + len(results)] = results + for name in results.dtype.names: + if name == "_id": + self._ingest_buf["sim_id"][self._n_buffd_results : self._n_buffd_results + len(results)] = results[ + "_id" + ] + else: + self._ingest_buf[name][self._n_buffd_results : self._n_buffd_results + len(results)] = results[name] def _enough_initial_sample(self): return ( @@ -361,7 +367,11 @@ def ingest_numpy(self, results: npt.NDArray, tag: int = EVAL_GEN_TAG) -> None: # Initial sample buffering here: if self._n_buffd_results == 0: - self._ingest_buf = np.zeros(self.gen_specs["user"]["initial_sample_size"], dtype=results.dtype) + # Create a dtype that includes sim_id but excludes _id + descr = [d for d in results.dtype.descr if d[0] != "_id"] + if "sim_id" not in [d[0] for d in descr]: + descr.append(("sim_id", int)) + self._ingest_buf = np.zeros(self.gen_specs["user"]["initial_sample_size"], dtype=descr) if not self._enough_initial_sample(): self._slot_in_data(np.copy(results)) diff --git a/libensemble/utils/misc.py b/libensemble/utils/misc.py index 19b67f37e3..a6a432f81a 100644 --- a/libensemble/utils/misc.py +++ b/libensemble/utils/misc.py @@ -74,9 +74,8 @@ def _get_new_dtype_fields(first: dict, mapping: dict = {}) -> list: fields_to_convert = list( # combining all mapping lists chain.from_iterable(list(mapping.values())) ) # fields like ["beam_length", "beam_width"] that will become "x" - new_dtype_names = [i for i in new_dtype_names if i not in fields_to_convert] + list( - mapping.keys() - ) # array dtype needs "x". avoid fields from mapping values since we're converting those to "x" + new_dtype_names = [i for i in new_dtype_names if i not in fields_to_convert] + # array dtype needs "x". avoid fields from mapping values since we're converting those to "x" # We need to accommodate "_id" getting mapped to "sim_id", but if it's not present # in the input dictionary, then perhaps we're doing an initial sample. if "_id" not in first and "sim_id" in mapping: @@ -139,9 +138,7 @@ def list_dicts_to_np(list_dicts: list, dtype: list = None, mapping: dict = {}) - new_dtype_names = _get_new_dtype_fields(first, mapping) combinable_names = _get_combinable_multidim_names(first, new_dtype_names) # [['x0', 'x1'], ['z']] - if ( - dtype is None - ): # Default value gets set upon function instantiation (default is mutable). + if dtype is None: # Default value gets set upon function instantiation (default is mutable). dtype = [] # build dtype of non-mapped fields. appending onto empty dtype @@ -219,6 +216,74 @@ def unmap_numpy_array(array: npt.NDArray, mapping: dict = {}) -> npt.NDArray: return unmapped_array +def map_numpy_array(array: npt.NDArray, mapping: dict = {}) -> npt.NDArray: + """Convert numpy array with individual scalar fields to mapped fields. + Parameters + ---------- + array : npt.NDArray + Input array with unmapped fields like x0, x1, x2 + mapping : dict + Mapping from field names to variable names + Returns + ------- + npt.NDArray + Array with mapped fields like x = [x0, x1, x2] + """ + if not mapping or array is None: + return array + + # Create new dtype with mapped fields + new_fields = [] + + # Track fields processed by mapping to avoid duplication + mapped_source_fields = set() + for key, val_list in mapping.items(): + mapped_source_fields.update(val_list) + + # First add mapped fields from the mapping definition + for mapped_name, val_list in mapping.items(): + if not val_list: + continue + first_var = val_list[0] + # We assume all components have the same type, take from first + if first_var in array.dtype.names: + base_type = array.dtype[first_var] + size = len(val_list) + if size > 1: + new_fields.append((mapped_name, base_type, (size,))) + else: + new_fields.append((mapped_name, base_type)) + + # Then add any fields from the source array that were NOT part of a mapping + for field in array.dtype.names: + if field not in mapped_source_fields: + new_fields.append((field, array.dtype[field])) + + # remove duplicates from new_fields + new_fields = list(dict.fromkeys(new_fields)) + + # Create the new array + mapped_array = np.zeros(len(array), dtype=new_fields) + + # Fill the new array + for field in mapped_array.dtype.names: + if field in mapping: + # Mapped field: stack the source columns + val_list = mapping[field] + if len(val_list) == 1: + mapped_array[field] = array[val_list[0]] + else: + # Stack columns horizontally for each row + # We need to extract each column, then stack them along axis 1 + cols = [array[val] for val in val_list] + mapped_array[field] = np.stack(cols, axis=1) + else: + # Direct copy + mapped_array[field] = array[field] + + return mapped_array + + def np_to_list_dicts(array: npt.NDArray, mapping: dict = {}) -> List[dict]: """Convert numpy structured array to list of dicts""" if array is None: diff --git a/libensemble/utils/runners.py b/libensemble/utils/runners.py index 8e940cfa4b..d993866ee5 100644 --- a/libensemble/utils/runners.py +++ b/libensemble/utils/runners.py @@ -9,7 +9,7 @@ from libensemble.generators import LibensembleGenerator, PersistentGenInterfacer from libensemble.message_numbers import EVAL_GEN_TAG, FINISHED_PERSISTENT_GEN_TAG, PERSIS_STOP, STOP_TAG from libensemble.tools.persistent_support import PersistentSupport -from libensemble.utils.misc import list_dicts_to_np, np_to_list_dicts +from libensemble.utils.misc import list_dicts_to_np, map_numpy_array, np_to_list_dicts, unmap_numpy_array logger = logging.getLogger(__name__) @@ -197,12 +197,17 @@ def _convert_initial_ingest(self, x: npt.NDArray) -> list: class LibensembleGenThreadRunner(StandardGenRunner): def _get_initial_suggest(self, libE_info) -> npt.NDArray: """Get initial batch from generator based on generator type""" - return self.gen.suggest() # libE really needs to receive the *entire* initial batch from a threaded gen + return unmap_numpy_array( + self.gen.suggest_numpy(), mapping=getattr(self.gen, "variables_mapping", {}) + ) # libE really needs to receive the *entire* initial batch from a threaded gen + + def _convert_initial_ingest(self, x: npt.NDArray) -> list: + self.gen.ingest_numpy(map_numpy_array(x, mapping=getattr(self.gen, "variables_mapping", {}))) def _suggest_and_send(self): """Loop over generator's outbox contents, send to manager""" while not self.gen._running_gen_f.outbox.empty(): # recv/send any outstanding messages - points = self.gen.suggest() + points = unmap_numpy_array(self.gen.suggest_numpy(), mapping=getattr(self.gen, "variables_mapping", {})) if callable(getattr(self.gen, "suggest_updates", None)): updates = self.gen.suggest_updates() else: @@ -222,6 +227,8 @@ def _loop_over_gen(self, *args): while self.ps.comm.mail_flag(): # receive any new messages from Manager, give all to gen tag, _, H_in = self.ps.recv() if tag in [STOP_TAG, PERSIS_STOP]: - self.gen.ingest_numpy(H_in, PERSIS_STOP) + self.gen.ingest_numpy( + map_numpy_array(H_in, mapping=getattr(self.gen, "variables_mapping", {})), PERSIS_STOP + ) return self.gen._running_gen_f.result() - self.gen.ingest_numpy(H_in) + self.gen.ingest_numpy(map_numpy_array(H_in, mapping=getattr(self.gen, "variables_mapping", {}))) diff --git a/pixi.lock b/pixi.lock index 07f3d567dd..44d37e46ef 100644 --- a/pixi.lock +++ b/pixi.lock @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:92f8b98ae9e038ca9165e0c306116ca5144988ac1451e720c4d5c2d1af14cbd9 +oid sha256:432c0bdf87ed393d5f46ace44e8c0e3c37580ab096fbbc72398cd1e98559e0e1 size 1216383 From dcf2ed9ca0b8c7a5050ce3309f489cb7ffa0e08a Mon Sep 17 00:00:00 2001 From: jlnav Date: Wed, 18 Feb 2026 15:46:28 -0600 Subject: [PATCH 3/7] history.py: - Hint towards easy bug where user may have intended to set sim_specs.simulator instead of sim_specs.sim_f the regression test: - use vocs for second test iteration misc.py - more reliably build the dtype out of mapping keys that are present in the input data runners.py - some cleanup --- libensemble/history.py | 8 +++++++- .../regression_tests/test_asktell_aposmm_nlopt.py | 5 +---- libensemble/utils/misc.py | 10 ++++------ libensemble/utils/runners.py | 6 ++---- pixi.lock | 4 ++-- 5 files changed, 16 insertions(+), 17 deletions(-) diff --git a/libensemble/history.py b/libensemble/history.py index 3f35779cb4..045569edb6 100644 --- a/libensemble/history.py +++ b/libensemble/history.py @@ -123,7 +123,13 @@ def update_history_f(self, D: dict, kill_canceled_sims: bool = False) -> None: new_inds = D["libE_info"]["H_rows"] # The list of rows (as a numpy array) returned_H = D["calc_out"] - fields = returned_H.dtype.names if returned_H is not None else [] + try: + fields = returned_H.dtype.names if returned_H is not None else [] + except AttributeError: + raise AttributeError( + "Manager received an unexpected datatype from a simulation." + + "Perhaps you meant to set `SimSpecs.simulator` instead of `SimSpecs.sim_f`?" + ) if returned_H is not None and any([field not in self.H.dtype.names for field in returned_H.dtype.names]): self._append_new_fields(returned_H) diff --git a/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py b/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py index 765d44c703..42cd8bf4eb 100644 --- a/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py +++ b/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py @@ -22,7 +22,6 @@ import libensemble.gen_funcs from libensemble.executors.mpi_executor import MPIExecutor from libensemble.sim_funcs import six_hump_camel -from libensemble.sim_funcs.executor_hworld import executor_hworld as sim_f_exec # Import libEnsemble items for this test @@ -98,9 +97,7 @@ def six_hump_camel_func(x): sim_app2 = six_hump_camel.__file__ exctr = MPIExecutor() exctr.register_app(full_path=sim_app2, app_name="six_hump_camel", calc_type="sim") # Named app - workflow.sim_specs = SimSpecs( - sim_f=sim_f_exec, inputs=["x"], outputs=[("f", float), ("cstat", int)], user={"cores": 1} - ) + workflow.sim_specs = SimSpecs(simulator=six_hump_camel_func, vocs=vocs) workflow.exit_criteria = ExitCriteria(sim_max=200) workflow.add_random_streams() diff --git a/libensemble/utils/misc.py b/libensemble/utils/misc.py index a6a432f81a..12cd85a86c 100644 --- a/libensemble/utils/misc.py +++ b/libensemble/utils/misc.py @@ -75,11 +75,6 @@ def _get_new_dtype_fields(first: dict, mapping: dict = {}) -> list: chain.from_iterable(list(mapping.values())) ) # fields like ["beam_length", "beam_width"] that will become "x" new_dtype_names = [i for i in new_dtype_names if i not in fields_to_convert] - # array dtype needs "x". avoid fields from mapping values since we're converting those to "x" - # We need to accommodate "_id" getting mapped to "sim_id", but if it's not present - # in the input dictionary, then perhaps we're doing an initial sample. - if "_id" not in first and "sim_id" in mapping: - new_dtype_names.remove("sim_id") return new_dtype_names @@ -149,9 +144,12 @@ def list_dicts_to_np(list_dicts: list, dtype: list = None, mapping: dict = {}) - if len(mapping): existing_names = [f[0] for f in dtype] for name in mapping: - if name not in existing_names: + # If the field is already in the dtype, skip it. *And* the field is present in the input data + if name not in existing_names and all(src in first for src in mapping[name]): size = len(mapping[name]) dtype.append(_decide_dtype(name, 0.0, size)) # default to float + new_dtype_names.append(name) + combinable_names.append(mapping[name]) out = np.zeros(len(list_dicts), dtype=dtype) diff --git a/libensemble/utils/runners.py b/libensemble/utils/runners.py index d993866ee5..abff32ac35 100644 --- a/libensemble/utils/runners.py +++ b/libensemble/utils/runners.py @@ -195,11 +195,9 @@ def _convert_initial_ingest(self, x: npt.NDArray) -> list: class LibensembleGenThreadRunner(StandardGenRunner): - def _get_initial_suggest(self, libE_info) -> npt.NDArray: + def _get_initial_suggest(self, _) -> npt.NDArray: """Get initial batch from generator based on generator type""" - return unmap_numpy_array( - self.gen.suggest_numpy(), mapping=getattr(self.gen, "variables_mapping", {}) - ) # libE really needs to receive the *entire* initial batch from a threaded gen + return unmap_numpy_array(self.gen.suggest_numpy(), mapping=getattr(self.gen, "variables_mapping", {})) def _convert_initial_ingest(self, x: npt.NDArray) -> list: self.gen.ingest_numpy(map_numpy_array(x, mapping=getattr(self.gen, "variables_mapping", {}))) diff --git a/pixi.lock b/pixi.lock index 44d37e46ef..9445b272f8 100644 --- a/pixi.lock +++ b/pixi.lock @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:432c0bdf87ed393d5f46ace44e8c0e3c37580ab096fbbc72398cd1e98559e0e1 -size 1216383 +oid sha256:fffa4c0d8ca1c65988df4969fd392f632f6d6505d05cb3935b1fbbc4d23935a1 +size 1216617 From f47c6852b8faa25c70c91ad979d9c46281957e8a Mon Sep 17 00:00:00 2001 From: jlnav Date: Thu, 19 Feb 2026 07:49:48 -0600 Subject: [PATCH 4/7] exclude optimas ax tests on macos --- libensemble/tests/regression_tests/test_optimas_ax_mf.py | 7 ++----- .../tests/regression_tests/test_optimas_ax_multitask.py | 4 ++-- libensemble/tests/regression_tests/test_optimas_ax_sf.py | 4 ++-- pixi.lock | 2 +- 4 files changed, 7 insertions(+), 10 deletions(-) diff --git a/libensemble/tests/regression_tests/test_optimas_ax_mf.py b/libensemble/tests/regression_tests/test_optimas_ax_mf.py index b6f43b3edf..758aa1fc2c 100644 --- a/libensemble/tests/regression_tests/test_optimas_ax_mf.py +++ b/libensemble/tests/regression_tests/test_optimas_ax_mf.py @@ -16,9 +16,9 @@ # TESTSUITE_COMMS: mpi local # TESTSUITE_NPROCS: 4 # TESTSUITE_EXTRA: true +# TESTSUITE_OS_SKIP: OSX import numpy as np - from gest_api.vocs import VOCS from optimas.generators import AxMultiFidelityGenerator @@ -32,10 +32,7 @@ def eval_func_mf(input_params): x0 = input_params["x0"] x1 = input_params["x1"] resolution = input_params["res"] - result = -( - (x0 + 10 * np.cos(x0 + 0.1 * resolution)) - * (x1 + 5 * np.cos(x1 - 0.2 * resolution)) - ) + result = -((x0 + 10 * np.cos(x0 + 0.1 * resolution)) * (x1 + 5 * np.cos(x1 - 0.2 * resolution))) return {"f": result} diff --git a/libensemble/tests/regression_tests/test_optimas_ax_multitask.py b/libensemble/tests/regression_tests/test_optimas_ax_multitask.py index 04a2b5430f..9e97dcad70 100644 --- a/libensemble/tests/regression_tests/test_optimas_ax_multitask.py +++ b/libensemble/tests/regression_tests/test_optimas_ax_multitask.py @@ -23,10 +23,10 @@ # TESTSUITE_NPROCS: 4 # TESTSUITE_EXTRA: true # TESTSUITE_EXCLUDE: true +# TESTSUITE_OS_SKIP: OSX import numpy as np from gest_api.vocs import VOCS - from optimas.core import Task from optimas.generators import AxMultitaskGenerator @@ -37,7 +37,7 @@ def eval_func_multitask(input_params): """Evaluation function for task1 or task2 in multitask test""" - print(f'input_params: {input_params}') + print(f"input_params: {input_params}") x0 = input_params["x0"] x1 = input_params["x1"] trial_type = input_params["trial_type"] diff --git a/libensemble/tests/regression_tests/test_optimas_ax_sf.py b/libensemble/tests/regression_tests/test_optimas_ax_sf.py index ba0b66c297..e4ee9e8a79 100644 --- a/libensemble/tests/regression_tests/test_optimas_ax_sf.py +++ b/libensemble/tests/regression_tests/test_optimas_ax_sf.py @@ -16,9 +16,9 @@ # TESTSUITE_COMMS: mpi local # TESTSUITE_NPROCS: 4 # TESTSUITE_EXTRA: true +# TESTSUITE_OS_SKIP: OSX import numpy as np - from gest_api.vocs import VOCS from optimas.generators import AxSingleFidelityGenerator @@ -28,7 +28,7 @@ def eval_func_sf(input_params): - """Evaluation function for single-fidelity test. """ + """Evaluation function for single-fidelity test.""" x0 = input_params["x0"] x1 = input_params["x1"] result = -(x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1)) diff --git a/pixi.lock b/pixi.lock index 9445b272f8..79394ef448 100644 --- a/pixi.lock +++ b/pixi.lock @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fffa4c0d8ca1c65988df4969fd392f632f6d6505d05cb3935b1fbbc4d23935a1 +oid sha256:567e81eabdaf24db518cc9f93fa60e37d95dc39bc6a9b69db9eed1e8ec193997 size 1216617 From 0e7924a69d5d022e4726f410f3913b7e2e313779 Mon Sep 17 00:00:00 2001 From: jlnav Date: Thu, 19 Feb 2026 08:39:52 -0600 Subject: [PATCH 5/7] remove optimas-ax tests from newer python extended tests --- .github/workflows/extra.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/extra.yml b/.github/workflows/extra.yml index 99e1581ac7..de7fd86dcb 100644 --- a/.github/workflows/extra.yml +++ b/.github/workflows/extra.yml @@ -84,6 +84,8 @@ jobs: rm ./libensemble/tests/regression_tests/test_gpCAM.py # needs gpcam, which doesn't build on 3.13 rm ./libensemble/tests/regression_tests/test_asktell_gpCAM.py # needs gpcam, which doesn't build on 3.13 rm ./libensemble/tests/regression_tests/test_persistent_gp_multitask_ax.py # needs ax-platform, which doesn't yet support 3.14 + rm ./libensemble/tests/regression_tests/test_optimas_ax_mf.py # needs ax-platform, which doesn't yet support 3.14 + rm ./libensemble/tests/regression_tests/test_optimas_ax_sf.py # needs ax-platform, which doesn't yet support 3.14 - name: Start Redis if: matrix.os == 'ubuntu-latest' From 37bc5d85144b38f7f7a0debb1c2bba50f4d80345 Mon Sep 17 00:00:00 2001 From: jlnav Date: Fri, 20 Feb 2026 12:59:28 -0600 Subject: [PATCH 6/7] coverage fixes, plus adjustments for warning/handling when both the wrong simulator/sim_f parameter is set in sim_specs, and accordingly the wrong data is returned. test for this --- libensemble/history.py | 10 +---- libensemble/manager.py | 14 ++++++- .../test_asktell_aposmm_nlopt.py | 17 +++++++-- libensemble/utils/misc.py | 37 +++++++------------ libensemble/utils/runners.py | 15 +++++++- 5 files changed, 56 insertions(+), 37 deletions(-) diff --git a/libensemble/history.py b/libensemble/history.py index 045569edb6..aa16a4d771 100644 --- a/libensemble/history.py +++ b/libensemble/history.py @@ -121,16 +121,10 @@ def update_history_f(self, D: dict, kill_canceled_sims: bool = False) -> None: Updates the history after points have been evaluated """ - new_inds = D["libE_info"]["H_rows"] # The list of rows (as a numpy array) + new_inds = D["libE_info"]["H_rows"] returned_H = D["calc_out"] - try: - fields = returned_H.dtype.names if returned_H is not None else [] - except AttributeError: - raise AttributeError( - "Manager received an unexpected datatype from a simulation." - + "Perhaps you meant to set `SimSpecs.simulator` instead of `SimSpecs.sim_f`?" - ) + fields = returned_H.dtype.names if returned_H is not None else [] if returned_H is not None and any([field not in self.H.dtype.names for field in returned_H.dtype.names]): self._append_new_fields(returned_H) diff --git a/libensemble/manager.py b/libensemble/manager.py index 22ae8b5d3e..5c084d3211 100644 --- a/libensemble/manager.py +++ b/libensemble/manager.py @@ -484,7 +484,7 @@ def _update_state_on_worker_msg(self, persis_info: dict, D_recv: dict, w: int) - calc_status = D_recv["calc_status"] keep_state = D_recv["libE_info"].get("keep_state", False) - if w not in self.persis_pending and not self.W[w]["active_recv"] and not keep_state: + if (w not in self.persis_pending and not self.W[w]["active_recv"] and not keep_state) or self.WorkerExc: self.W[w]["active"] = 0 if calc_status in [FINISHED_PERSISTENT_SIM_TAG, FINISHED_PERSISTENT_GEN_TAG]: @@ -507,7 +507,17 @@ def _update_state_on_worker_msg(self, persis_info: dict, D_recv: dict, w: int) - self._freeup_resources(w) else: if calc_type == EVAL_SIM_TAG: - self.hist.update_history_f(D_recv, self.kill_canceled_sims) + try: + self.hist.update_history_f(D_recv, self.kill_canceled_sims) + except AttributeError as e: + if self.WorkerExc: + logger.debug(f"Manager ignoring secondary data error from worker {w} during shutdown: {e}") + else: + self.WorkerExc = True + self._kill_workers() + raise WorkerException( + f"Error in data from worker {w}", str(e), traceback.format_exc() + ) from None if calc_type == EVAL_GEN_TAG: D = D_recv["calc_out"] self._ensure_sim_id_in_persis_in(D) diff --git a/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py b/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py index 42cd8bf4eb..4b03d7678b 100644 --- a/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py +++ b/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py @@ -33,6 +33,7 @@ from libensemble import Ensemble from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens as alloc_f from libensemble.gen_classes import APOSMM +from libensemble.manager import LoggedException from libensemble.specs import AllocSpecs, ExitCriteria, GenSpecs, SimSpecs from libensemble.tests.regression_tests.support import six_hump_camel_minima as minima @@ -53,7 +54,7 @@ def six_hump_camel_func(x): # Main block is necessary only when using local comms with spawn start method (default on macOS and Windows). if __name__ == "__main__": - for run in range(2): + for run in range(3): workflow = Ensemble(parse_args=True) @@ -99,13 +100,23 @@ def six_hump_camel_func(x): exctr.register_app(full_path=sim_app2, app_name="six_hump_camel", calc_type="sim") # Named app workflow.sim_specs = SimSpecs(simulator=six_hump_camel_func, vocs=vocs) workflow.exit_criteria = ExitCriteria(sim_max=200) + elif run == 2: + workflow.persis_info["num_gens_started"] = 0 + workflow.sim_specs = SimSpecs( + sim_f=six_hump_camel_func, vocs=vocs + ) # wrong parameter, but check we get error message + workflow.exit_criteria = ExitCriteria(sim_max=200) workflow.add_random_streams() - H, _, _ = workflow.run() + try: + H, _, _ = workflow.run() + except Exception as e: + assert isinstance(e, LoggedException) + aposmm.finalize() + continue # Perform the run - if workflow.is_manager and run == 0: print("[Manager]:", H[np.where(H["local_min"])]["x"]) print("[Manager]: Time taken =", time() - start_time, flush=True) diff --git a/libensemble/utils/misc.py b/libensemble/utils/misc.py index 12cd85a86c..83f2388800 100644 --- a/libensemble/utils/misc.py +++ b/libensemble/utils/misc.py @@ -240,17 +240,14 @@ def map_numpy_array(array: npt.NDArray, mapping: dict = {}) -> npt.NDArray: # First add mapped fields from the mapping definition for mapped_name, val_list in mapping.items(): - if not val_list: - continue first_var = val_list[0] # We assume all components have the same type, take from first - if first_var in array.dtype.names: - base_type = array.dtype[first_var] - size = len(val_list) - if size > 1: - new_fields.append((mapped_name, base_type, (size,))) - else: - new_fields.append((mapped_name, base_type)) + base_type = array.dtype[first_var] + size = len(val_list) + if size > 1: + new_fields.append((mapped_name, base_type, (size,))) + else: + new_fields.append((mapped_name, base_type)) # Then add any fields from the source array that were NOT part of a mapping for field in array.dtype.names: @@ -265,27 +262,21 @@ def map_numpy_array(array: npt.NDArray, mapping: dict = {}) -> npt.NDArray: # Fill the new array for field in mapped_array.dtype.names: - if field in mapping: - # Mapped field: stack the source columns - val_list = mapping[field] - if len(val_list) == 1: - mapped_array[field] = array[val_list[0]] - else: - # Stack columns horizontally for each row - # We need to extract each column, then stack them along axis 1 - cols = [array[val] for val in val_list] - mapped_array[field] = np.stack(cols, axis=1) + # Mapped field: stack the source columns + val_list = mapping[field] + if len(val_list) == 1: + mapped_array[field] = array[val_list[0]] else: - # Direct copy - mapped_array[field] = array[field] + # Stack columns horizontally for each row + # We need to extract each column, then stack them along axis 1 + cols = [array[val] for val in val_list] + mapped_array[field] = np.stack(cols, axis=1) return mapped_array def np_to_list_dicts(array: npt.NDArray, mapping: dict = {}) -> List[dict]: """Convert numpy structured array to list of dicts""" - if array is None: - return None out = [] for row in array: diff --git a/libensemble/utils/runners.py b/libensemble/utils/runners.py index abff32ac35..ee0ebd65cf 100644 --- a/libensemble/utils/runners.py +++ b/libensemble/utils/runners.py @@ -51,7 +51,20 @@ def shutdown(self) -> None: def run(self, calc_in: npt.NDArray, Work: dict) -> (npt.NDArray, dict, int | None): if Work["persis_info"] is None: Work["persis_info"] = {} - return self._result(calc_in, Work["persis_info"], Work["libE_info"]) + out = self._result(calc_in, Work["persis_info"], Work["libE_info"]) + + # Help users who mixed up sim_f and simulator parameters + if isinstance(out, (tuple, list)): + calc_out = out[0] + else: + calc_out = out + + if isinstance(calc_out, dict): + raise AttributeError( + "Manager received a dictionary from a simulation. " + "Perhaps you meant to set `SimSpecs.simulator` instead of `SimSpecs.sim_f`?" + ) + return out class GlobusComputeRunner(Runner): From ebce988dc050d7359903d40464a8be44d4e316b9 Mon Sep 17 00:00:00 2001 From: jlnav Date: Fri, 20 Feb 2026 14:04:41 -0600 Subject: [PATCH 7/7] adjusts for MPI case --- .../tests/regression_tests/test_asktell_aposmm_nlopt.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py b/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py index 4b03d7678b..cbcd4c22e0 100644 --- a/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py +++ b/libensemble/tests/regression_tests/test_asktell_aposmm_nlopt.py @@ -106,6 +106,8 @@ def six_hump_camel_func(x): sim_f=six_hump_camel_func, vocs=vocs ) # wrong parameter, but check we get error message workflow.exit_criteria = ExitCriteria(sim_max=200) + return_flag = False + workflow.libE_specs.abort_on_exception = False workflow.add_random_streams() @@ -114,8 +116,12 @@ def six_hump_camel_func(x): except Exception as e: assert isinstance(e, LoggedException) aposmm.finalize() + return_flag = False continue + if run == 2 and workflow.is_manager: + assert return_flag + # Perform the run if workflow.is_manager and run == 0: print("[Manager]:", H[np.where(H["local_min"])]["x"])