From 6a513fa6f881f30f48c8ebaff1d48ac8ff07cc65 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 30 Jun 2025 16:49:02 +0000 Subject: [PATCH 001/244] start of general decode --- pynapple/process/__init__.py | 2 +- pynapple/process/decoding.py | 183 ++++++++++++++++++++++++++++ tests/test_decode_general.py | 225 +++++++++++++++++++++++++++++++++++ 3 files changed, 409 insertions(+), 1 deletion(-) create mode 100644 tests/test_decode_general.py diff --git a/pynapple/process/__init__.py b/pynapple/process/__init__.py index 3893822c5..117c540c2 100644 --- a/pynapple/process/__init__.py +++ b/pynapple/process/__init__.py @@ -4,7 +4,7 @@ compute_eventcorrelogram, compute_isi_distribution, ) -from .decoding import decode_1d, decode_2d +from .decoding import decode, decode_1d, decode_2d from .filtering import ( apply_bandpass_filter, apply_bandstop_filter, diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index e730138b5..430e4403a 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -262,3 +262,186 @@ def decode_2d(tuning_curves, group, ep, bin_size, xy, time_units="s", features=N ) return decoded, p + + +def decode( + tuning_curves, tuning_curve_bins, group, ep, bin_size, time_units="s", features=None +): + """ + Performs Bayesian decoding over n-dimensional features. + + See: + Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. + (1998). Interpreting neuronal population activity by + reconstruction: unified framework with application to + hippocampal place cells. Journal of neurophysiology, 79(2), + 1017-1044. + + Parameters + ---------- + tuning_curves : dict + Dictionary of tuning curves (one for each neuron). + tuning_curve_bins : list + List of lists containing the bin positions for each dimension of the tuning curves. + group : TsGroup, TsdFrame or dict of Ts/Tsd object. + A group of neurons with the same keys as tuning_curves dictionary. + You may also pass a TsdFrame with smoothed rates (recommended). + ep : IntervalSet + The epoch on which decoding is computed + bin_size : float + Bin size. Default is second. Use the parameter time_units to change it. + time_units : str, optional + Time unit of the bin size ('s' [default], 'ms', 'us'). + features : TsdFrame + The features used to compute the tuning curves. + Used to correct for occupancy. + If feature is not passed, the occupancy is uniform. + + Returns + ------- + Tsd + The decoded feature + numpy.ndarray + The probability distribution of the decoded trajectory for each time bin + + Raises + ------ + RuntimeError + If group is not a dict of Ts/Tsd or TsGroup. + If different size of neurons for tuning_curves and group. + If indexes don't match between tuning_curves and group. + + """ + + if type(group) is nap.TsdFrame: + newgroup = group.restrict(ep) + numcells = newgroup.shape[1] + + if len(tuning_curves) != numcells: + raise RuntimeError("Different shapes for tuning_curves and group") + + if not np.all( + np.array(list(tuning_curves.keys())) == np.array(newgroup.columns) + ): + raise RuntimeError("Different indices for tuning curves and group keys") + + count = group + + elif type(group) is nap.TsGroup: + newgroup = group.restrict(ep) + numcells = len(newgroup) + + if len(tuning_curves) != numcells: + raise RuntimeError("Different shapes for tuning_curves and group") + + if not np.all( + np.array(list(tuning_curves.keys())) == np.array(newgroup.keys()) + ): + raise RuntimeError("Different indices for tuning curves and group keys") + + count = newgroup.count(bin_size, ep, time_units) + + elif type(group) is dict: + newgroup = nap.TsGroup(group, time_support=ep) + count = newgroup.count(bin_size, ep, time_units) + + else: + raise RuntimeError("Unknown format for group") + + if not isinstance(tuning_curve_bins, list | np.ndarray): + raise RuntimeError( + "tuning_curve_bins should be a list or array of feature bins." + ) + tuning_curve_bins = np.array(tuning_curve_bins) + if len(tuning_curve_bins) != list(tuning_curves.values())[0].ndim: + raise RuntimeError("Tuning curve shape and tuning curve bins do not match.") + + indexes = list(tuning_curves.keys()) + + # Occupancy + if features is None: + occupancy = np.ones_like(tuning_curves[indexes[0]]).flatten() + elif isinstance(features, nap.TsdFrame | nap.Tsd): + if isinstance(features, nap.Tsd): + features = nap.TsdFrame(t=features.times(), d=features.values) + if features.ndim == 1: + features = features[:, np.newaxis] + if len(tuning_curve_bins) != features.shape[1]: + raise RuntimeError("Number of features and tuning_curve_bins do not match.") + + bins = [] + for i in range(len(tuning_curve_bins)): + diff = np.diff(tuning_curve_bins[i]) + _bins = tuning_curve_bins[i][:-1] - diff / 2 + _bins = np.hstack( + (_bins, [_bins[-1] + diff[-1], _bins[-1] + 2 * diff[-1]]) + ) # assuming the size of the last 2 bins is equal + bins.append(_bins) + + occupancy, _ = np.histogramdd(features, bins) + occupancy = occupancy.flatten() + else: + raise RuntimeError("Features should be a TsdFrame.") + + # Transforming to pure numpy array + tc = np.array([tuning_curves[i] for i in tuning_curves.keys()]) + tc = tc.reshape(tc.shape[0], np.prod(tc.shape[1:])) + tc = tc.T + ct = count.values + bin_size_s = nap.TsIndex.format_timestamps( + np.array([bin_size], dtype=np.float64), time_units + )[0] + + p1 = np.exp(-bin_size_s * np.nansum(tc, 1)) + p2 = occupancy / occupancy.sum() + + ct2 = np.tile(ct[:, np.newaxis, :], (1, tc.shape[0], 1)) + + p3 = np.nanprod(tc**ct2, -1) + + p = p1 * p2 * p3 + p = p / p.sum(1)[:, np.newaxis] + + idxmax = np.argmax(p, 1) + + n_bins_per_feature = [ + tuning_curve_bins[i].shape[0] for i in range(len(tuning_curve_bins)) + ] + p = p.reshape( + p.shape[0], + *n_bins_per_feature, + ) + p = getattr(nap, f"Tsd{'Tensor' if p.ndim > 2 else 'Frame'}")( + t=count.index, + d=p, + time_support=ep, + ) + + idxmax = np.unravel_index(idxmax, n_bins_per_feature) + + if features is not None: + cols = features.columns + else: + cols = np.arange(len(tuning_curve_bins)) + + if len(tuning_curve_bins) == 1: + decoded = nap.Tsd( + t=count.index, + d=tuning_curve_bins[0][idxmax[0]], + time_support=ep, + ) + else: + decoded = nap.TsdFrame( + t=count.index, + d=np.stack( + [ + tuning_curve_bins[i][idxmax[i]] + for i in range(len(tuning_curve_bins)) + ], + axis=1, + ), + time_support=ep, + columns=cols, + ) + + return decoded, p diff --git a/tests/test_decode_general.py b/tests/test_decode_general.py new file mode 100644 index 000000000..0e1cc732f --- /dev/null +++ b/tests/test_decode_general.py @@ -0,0 +1,225 @@ +import numpy as np +import pytest + +import pynapple as nap + + +def get_testing_set_1d(): + feature = nap.Tsd(t=np.arange(0, 100, 1), d=np.repeat(np.arange(0, 2), 50)) + group = nap.TsGroup({i: nap.Ts(t=np.arange(0, 50) + 50 * i) for i in range(2)}) + tc = nap.compute_1d_tuning_curves( + group=group, feature=feature, nb_bins=2, minmax=(-0.5, 1.5) + ) + tc_bins = tc.index.values[None, :] + tc = {i: tc[i] for i in tc.columns} + ep = nap.IntervalSet(start=0, end=100) + return feature, group, tc, tc_bins, ep + + +def test_decode_1d(): + feature, group, tc, tc_bins, ep = get_testing_set_1d() + decoded, proba = nap.decode(tc, tc_bins, group, ep, bin_size=1) + assert isinstance(decoded, nap.Tsd) + assert isinstance(proba, nap.TsdFrame) + np.testing.assert_array_almost_equal(feature.values, decoded.values) + assert len(decoded) == 100 + assert len(proba) == 100 + tmp = np.ones((100, 2)) + tmp[50:, 0] = 0.0 + tmp[0:50, 1] = 0.0 + np.testing.assert_array_almost_equal(proba.values, tmp) + + +def test_decode_1d_with_TsdFrame(): + feature, group, tc, tc_bins, ep = get_testing_set_1d() + count = group.count(bin_size=1, ep=ep) + decoded, proba = nap.decode(tc, tc_bins, count, ep, bin_size=1) + assert isinstance(decoded, nap.Tsd) + assert isinstance(proba, nap.TsdFrame) + np.testing.assert_array_almost_equal(feature.values, decoded.values) + assert len(decoded) == 100 + assert len(proba) == 100 + tmp = np.ones((100, 2)) + tmp[50:, 0] = 0.0 + tmp[0:50, 1] = 0.0 + np.testing.assert_array_almost_equal(proba.values, tmp) + + +def test_decode_1d_with_feature(): + feature, group, tc, tc_bins, ep = get_testing_set_1d() + decoded, proba = nap.decode(tc, tc_bins, group, ep, bin_size=1, features=feature) + np.testing.assert_array_almost_equal(feature.values, decoded.values) + assert isinstance(decoded, nap.Tsd) + assert isinstance(proba, nap.TsdFrame) + np.testing.assert_array_almost_equal(feature.values, decoded.values) + assert len(decoded) == 100 + assert len(proba) == 100 + tmp = np.ones((100, 2)) + tmp[50:, 0] = 0.0 + tmp[0:50, 1] = 0.0 + np.testing.assert_array_almost_equal(proba.values, tmp) + + +def test_decode_1d_with_dict(): + feature, group, tc, tc_bins, ep = get_testing_set_1d() + group = dict(group) + decoded, proba = nap.decode(tc, tc_bins, group, ep, bin_size=1, features=feature) + np.testing.assert_array_almost_equal(feature.values, decoded.values) + assert isinstance(decoded, nap.Tsd) + assert isinstance(proba, nap.TsdFrame) + np.testing.assert_array_almost_equal(feature.values, decoded.values) + assert len(decoded) == 100 + assert len(proba) == 100 + tmp = np.ones((100, 2)) + tmp[50:, 0] = 0.0 + tmp[0:50, 1] = 0.0 + np.testing.assert_array_almost_equal(proba.values, tmp) + + +def test_decode_1d_with_wrong_feature(): + feature, group, tc, tc_bins, ep = get_testing_set_1d() + with pytest.raises(RuntimeError) as e_info: + nap.decode(tc, tc_bins, group, ep, bin_size=1, features=[1, 2, 3]) + assert str(e_info.value) == "Features should be a TsdFrame." + + +def test_decode_1d_with_time_units(): + feature, group, tc, tc_bins, ep = get_testing_set_1d() + for t, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): + decoded, proba = nap.decode(tc, tc_bins, group, ep, 1.0 * t, time_units=tu) + np.testing.assert_array_almost_equal(feature.values, decoded.values) + + +def test_decoded_1d_raise_errors(): + feature, group, tc, tc_bins, ep = get_testing_set_1d() + with pytest.raises(Exception) as e_info: + nap.decode(tc, tc_bins, np.random.rand(10), ep, 1) + assert str(e_info.value) == "Unknown format for group" + + tc[2] = np.random.rand(2) + with pytest.raises(Exception) as e_info: + nap.decode(tc, tc_bins, group, ep, 1) + assert str(e_info.value) == "Different shapes for tuning_curves and group" + + feature, group, tc, tc_bins, ep = get_testing_set_1d() + tc = {k: values for k, values in zip(list(tc.keys())[::-1], tc.values())} + with pytest.raises(Exception) as e_info: + nap.decode(tc, tc_bins, group, ep, 1) + assert str(e_info.value) == "Different indices for tuning curves and group keys" + + +# +# +# def get_testing_set_2d(): +# features = nap.TsdFrame( +# t=np.arange(0, 100, 1), +# d=np.vstack((np.repeat(np.arange(0, 2), 50), np.tile(np.arange(0, 2), 50))).T, +# ) +# group = nap.TsGroup( +# { +# 0: nap.Ts(np.arange(0, 50, 2)), +# 1: nap.Ts(np.arange(1, 51, 2)), +# 2: nap.Ts(np.arange(50, 100, 2)), +# 3: nap.Ts(np.arange(51, 101, 2)), +# } +# ) +# +# tc, xy = nap.compute_2d_tuning_curves( +# group=group, features=features, nb_bins=2, minmax=(-0.5, 1.5, -0.5, 1.5) +# ) +# ep = nap.IntervalSet(start=0, end=100) +# return features, group, tc, ep, tuple(xy) +# +# +# def test_decode_2d(): +# features, group, tc, ep, xy = get_testing_set_2d() +# decoded, proba = nap.decode_2d(tc, group, ep, 1, xy) +# +# assert isinstance(decoded, nap.TsdFrame) +# assert isinstance(proba, np.ndarray) +# np.testing.assert_array_almost_equal(features.values, decoded.values) +# assert len(decoded) == 100 +# assert len(proba) == 100 +# tmp = np.zeros((100, 2)) +# tmp[0:50:2, 0] = 1 +# tmp[50:100:2, 1] = 1 +# np.testing.assert_array_almost_equal(proba[:, :, 0], tmp) +# +# tmp = np.zeros((100, 2)) +# tmp[1:50:2, 0] = 1 +# tmp[51:100:2, 1] = 1 +# np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) +# +# +# def test_decode_2d_with_TsdFrame(): +# features, group, tc, ep, xy = get_testing_set_2d() +# count = group.count(bin_size=1, ep=ep) +# decoded, proba = nap.decode_2d(tc, count, ep, 1, xy) +# +# assert isinstance(decoded, nap.TsdFrame) +# assert isinstance(proba, np.ndarray) +# np.testing.assert_array_almost_equal(features.values, decoded.values) +# assert len(decoded) == 100 +# assert len(proba) == 100 +# tmp = np.zeros((100, 2)) +# tmp[0:50:2, 0] = 1 +# tmp[50:100:2, 1] = 1 +# np.testing.assert_array_almost_equal(proba[:, :, 0], tmp) +# +# tmp = np.zeros((100, 2)) +# tmp[1:50:2, 0] = 1 +# tmp[51:100:2, 1] = 1 +# np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) +# +# +# def test_decode_2d_with_dict(): +# features, group, tc, ep, xy = get_testing_set_2d() +# group = dict(group) +# decoded, proba = nap.decode_2d(tc, group, ep, 1, xy) +# +# assert isinstance(decoded, nap.TsdFrame) +# assert isinstance(proba, np.ndarray) +# np.testing.assert_array_almost_equal(features.values, decoded.values) +# assert len(decoded) == 100 +# assert len(proba) == 100 +# tmp = np.zeros((100, 2)) +# tmp[0:50:2, 0] = 1 +# tmp[50:100:2, 1] = 1 +# np.testing.assert_array_almost_equal(proba[:, :, 0], tmp) +# +# tmp = np.zeros((100, 2)) +# tmp[1:50:2, 0] = 1 +# tmp[51:100:2, 1] = 1 +# np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) +# +# +# def test_decode_2d_with_feature(): +# features, group, tc, ep, xy = get_testing_set_2d() +# decoded, proba = nap.decode_2d(tc, group, ep, 1, xy) +# np.testing.assert_array_almost_equal(features.values, decoded.values) +# +# +# def test_decode_2d_with_time_units(): +# features, group, tc, ep, xy = get_testing_set_2d() +# for t, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): +# decoded, proba = nap.decode_2d(tc, group, ep, 1.0 * t, xy, time_units=tu) +# np.testing.assert_array_almost_equal(features.values, decoded.values) +# +# +# def test_decoded_2d_raise_errors(): +# features, group, tc, ep, xy = get_testing_set_2d() +# with pytest.raises(Exception) as e_info: +# nap.decode_2d(tc, np.random.rand(10), ep, 1, xy) +# assert str(e_info.value) == "Unknown format for group" +# +# features, group, tc, ep, xy = get_testing_set_2d() +# tc[5] = np.random.rand(2, 2) +# with pytest.raises(Exception) as e_info: +# nap.decode_2d(tc, group, ep, 1, xy) +# assert str(e_info.value) == "Different shapes for tuning_curves and group" +# +# features, group, tc, ep, xy = get_testing_set_2d() +# tc = {k: tc[i] for k, i in zip(np.arange(0, 40, 10), tc.keys())} +# with pytest.raises(Exception) as e_info: +# nap.decode_2d(tc, group, ep, 1, xy) +# assert str(e_info.value) == "Different indices for tuning curves and group keys" From b760a8c4ccb0593e2d78f18d7d93d51ae0b3e4f9 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 30 Jun 2025 17:11:52 +0000 Subject: [PATCH 002/244] general decoding tests --- tests/test_decode_general.py | 208 +++++++++++++++-------------------- 1 file changed, 91 insertions(+), 117 deletions(-) diff --git a/tests/test_decode_general.py b/tests/test_decode_general.py index 0e1cc732f..212149db4 100644 --- a/tests/test_decode_general.py +++ b/tests/test_decode_general.py @@ -83,14 +83,14 @@ def test_decode_1d_with_wrong_feature(): assert str(e_info.value) == "Features should be a TsdFrame." -def test_decode_1d_with_time_units(): +def test_decode_with_time_units(): feature, group, tc, tc_bins, ep = get_testing_set_1d() for t, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): decoded, proba = nap.decode(tc, tc_bins, group, ep, 1.0 * t, time_units=tu) np.testing.assert_array_almost_equal(feature.values, decoded.values) -def test_decoded_1d_raise_errors(): +def test_decoded_raise_errors(): feature, group, tc, tc_bins, ep = get_testing_set_1d() with pytest.raises(Exception) as e_info: nap.decode(tc, tc_bins, np.random.rand(10), ep, 1) @@ -108,118 +108,92 @@ def test_decoded_1d_raise_errors(): assert str(e_info.value) == "Different indices for tuning curves and group keys" -# -# -# def get_testing_set_2d(): -# features = nap.TsdFrame( -# t=np.arange(0, 100, 1), -# d=np.vstack((np.repeat(np.arange(0, 2), 50), np.tile(np.arange(0, 2), 50))).T, -# ) -# group = nap.TsGroup( -# { -# 0: nap.Ts(np.arange(0, 50, 2)), -# 1: nap.Ts(np.arange(1, 51, 2)), -# 2: nap.Ts(np.arange(50, 100, 2)), -# 3: nap.Ts(np.arange(51, 101, 2)), -# } -# ) -# -# tc, xy = nap.compute_2d_tuning_curves( -# group=group, features=features, nb_bins=2, minmax=(-0.5, 1.5, -0.5, 1.5) -# ) -# ep = nap.IntervalSet(start=0, end=100) -# return features, group, tc, ep, tuple(xy) -# -# -# def test_decode_2d(): -# features, group, tc, ep, xy = get_testing_set_2d() -# decoded, proba = nap.decode_2d(tc, group, ep, 1, xy) -# -# assert isinstance(decoded, nap.TsdFrame) -# assert isinstance(proba, np.ndarray) -# np.testing.assert_array_almost_equal(features.values, decoded.values) -# assert len(decoded) == 100 -# assert len(proba) == 100 -# tmp = np.zeros((100, 2)) -# tmp[0:50:2, 0] = 1 -# tmp[50:100:2, 1] = 1 -# np.testing.assert_array_almost_equal(proba[:, :, 0], tmp) -# -# tmp = np.zeros((100, 2)) -# tmp[1:50:2, 0] = 1 -# tmp[51:100:2, 1] = 1 -# np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) -# -# -# def test_decode_2d_with_TsdFrame(): -# features, group, tc, ep, xy = get_testing_set_2d() -# count = group.count(bin_size=1, ep=ep) -# decoded, proba = nap.decode_2d(tc, count, ep, 1, xy) -# -# assert isinstance(decoded, nap.TsdFrame) -# assert isinstance(proba, np.ndarray) -# np.testing.assert_array_almost_equal(features.values, decoded.values) -# assert len(decoded) == 100 -# assert len(proba) == 100 -# tmp = np.zeros((100, 2)) -# tmp[0:50:2, 0] = 1 -# tmp[50:100:2, 1] = 1 -# np.testing.assert_array_almost_equal(proba[:, :, 0], tmp) -# -# tmp = np.zeros((100, 2)) -# tmp[1:50:2, 0] = 1 -# tmp[51:100:2, 1] = 1 -# np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) -# -# -# def test_decode_2d_with_dict(): -# features, group, tc, ep, xy = get_testing_set_2d() -# group = dict(group) -# decoded, proba = nap.decode_2d(tc, group, ep, 1, xy) -# -# assert isinstance(decoded, nap.TsdFrame) -# assert isinstance(proba, np.ndarray) -# np.testing.assert_array_almost_equal(features.values, decoded.values) -# assert len(decoded) == 100 -# assert len(proba) == 100 -# tmp = np.zeros((100, 2)) -# tmp[0:50:2, 0] = 1 -# tmp[50:100:2, 1] = 1 -# np.testing.assert_array_almost_equal(proba[:, :, 0], tmp) -# -# tmp = np.zeros((100, 2)) -# tmp[1:50:2, 0] = 1 -# tmp[51:100:2, 1] = 1 -# np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) -# -# -# def test_decode_2d_with_feature(): -# features, group, tc, ep, xy = get_testing_set_2d() -# decoded, proba = nap.decode_2d(tc, group, ep, 1, xy) -# np.testing.assert_array_almost_equal(features.values, decoded.values) -# -# -# def test_decode_2d_with_time_units(): -# features, group, tc, ep, xy = get_testing_set_2d() -# for t, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): -# decoded, proba = nap.decode_2d(tc, group, ep, 1.0 * t, xy, time_units=tu) -# np.testing.assert_array_almost_equal(features.values, decoded.values) -# -# -# def test_decoded_2d_raise_errors(): -# features, group, tc, ep, xy = get_testing_set_2d() -# with pytest.raises(Exception) as e_info: -# nap.decode_2d(tc, np.random.rand(10), ep, 1, xy) -# assert str(e_info.value) == "Unknown format for group" -# -# features, group, tc, ep, xy = get_testing_set_2d() -# tc[5] = np.random.rand(2, 2) -# with pytest.raises(Exception) as e_info: -# nap.decode_2d(tc, group, ep, 1, xy) -# assert str(e_info.value) == "Different shapes for tuning_curves and group" -# -# features, group, tc, ep, xy = get_testing_set_2d() -# tc = {k: tc[i] for k, i in zip(np.arange(0, 40, 10), tc.keys())} -# with pytest.raises(Exception) as e_info: -# nap.decode_2d(tc, group, ep, 1, xy) -# assert str(e_info.value) == "Different indices for tuning curves and group keys" +def get_testing_set_2d(): + features = nap.TsdFrame( + t=np.arange(0, 100, 1), + d=np.vstack( + (np.repeat(np.arange(0, 2), 50), np.tile(np.arange(0, 2), 50)) + ).T.astype(np.float64), + ) + group = nap.TsGroup( + { + 0: nap.Ts(np.arange(0, 50, 2)), + 1: nap.Ts(np.arange(1, 51, 2)), + 2: nap.Ts(np.arange(50, 100, 2)), + 3: nap.Ts(np.arange(51, 101, 2)), + } + ) + + tc, tc_bins = nap.compute_2d_tuning_curves( + group=group, features=features, nb_bins=2, minmax=(-0.5, 1.5, -0.5, 1.5) + ) + ep = nap.IntervalSet(start=0, end=100) + return features, group, tc, tc_bins, ep + + +def test_decode_2d(): + features, group, tc, tc_bins, ep = get_testing_set_2d() + decoded, proba = nap.decode(tc, tc_bins, group, ep, 1) + + assert isinstance(decoded, nap.TsdFrame) + assert isinstance(proba, nap.TsdTensor) + np.testing.assert_array_almost_equal(features.values, decoded.values) + assert len(decoded) == 100 + assert len(proba) == 100 + tmp = np.zeros((100, 2)) + tmp[0:50:2, 0] = 1 + tmp[50:100:2, 1] = 1 + np.testing.assert_array_almost_equal(proba[:, :, 0], tmp) + + tmp = np.zeros((100, 2)) + tmp[1:50:2, 0] = 1 + tmp[51:100:2, 1] = 1 + np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) + + +def test_decode_2d_with_TsdFrame(): + features, group, tc, tc_bins, ep = get_testing_set_2d() + count = group.count(bin_size=1, ep=ep) + decoded, proba = nap.decode(tc, tc_bins, count, ep, 1) + + assert isinstance(decoded, nap.TsdFrame) + assert isinstance(proba, nap.TsdTensor) + np.testing.assert_array_almost_equal(features.values, decoded.values) + assert len(decoded) == 100 + assert len(proba) == 100 + tmp = np.zeros((100, 2)) + tmp[0:50:2, 0] = 1 + tmp[50:100:2, 1] = 1 + np.testing.assert_array_almost_equal(proba[:, :, 0], tmp) + + tmp = np.zeros((100, 2)) + tmp[1:50:2, 0] = 1 + tmp[51:100:2, 1] = 1 + np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) + + +def test_decode_2d_with_dict(): + features, group, tc, tc_bins, ep = get_testing_set_2d() + group = dict(group) + decoded, proba = nap.decode(tc, tc_bins, group, ep, 1) + + assert isinstance(decoded, nap.TsdFrame) + assert isinstance(proba, nap.TsdTensor) + np.testing.assert_array_almost_equal(features.values, decoded.values) + assert len(decoded) == 100 + assert len(proba) == 100 + tmp = np.zeros((100, 2)) + tmp[0:50:2, 0] = 1 + tmp[50:100:2, 1] = 1 + np.testing.assert_array_almost_equal(proba[:, :, 0], tmp) + + tmp = np.zeros((100, 2)) + tmp[1:50:2, 0] = 1 + tmp[51:100:2, 1] = 1 + np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) + + +def test_decode_2d_with_feature(): + features, group, tc, tc_bins, ep = get_testing_set_2d() + decoded, proba = nap.decode(tc, tc_bins, group, ep, 1) + np.testing.assert_array_almost_equal(features.values, decoded.values) From 1abad762ebb496d312ee5374b5a2951aefe35a5a Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 1 Jul 2025 18:03:04 +0000 Subject: [PATCH 003/244] start of general tuning curve computatio --- pynapple/process/__init__.py | 1 + pynapple/process/tuning_curves.py | 109 ++++++++++++++++++++++ tests/test_tuning_curves_general.py | 136 ++++++++++++++++++++++++++++ 3 files changed, 246 insertions(+) create mode 100644 tests/test_tuning_curves_general.py diff --git a/pynapple/process/__init__.py b/pynapple/process/__init__.py index 3893822c5..e4f63e8c3 100644 --- a/pynapple/process/__init__.py +++ b/pynapple/process/__init__.py @@ -36,6 +36,7 @@ compute_2d_tuning_curves, compute_2d_tuning_curves_continuous, compute_discrete_tuning_curves, + compute_tuning_curves, ) from .warping import build_tensor, warp_tensor from .wavelets import compute_wavelet_transform, generate_morlet_filterbank diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index fba5600b2..7db9a8758 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -81,6 +81,115 @@ def wrapper(*args, **kwargs): return wrapper +def compute_tuning_curves(group, features, num_bins, epoch=None, bounds=None): + """ + Computes n-dimensional tuning curves relative to n features. + + Parameters + ---------- + group : TsGroup, TsdFrame or dict of Ts/Tsd object. + The group of Ts/Tsd for which the tuning curves will be computed + You may also pass a TsdFrame with smoothed rates (recommended). + features : TsdFrame + The features (i.e. one column per feature). + num_bins : int or list + Number of bins in the tuning curves (can be separate for each feature dimension, if list provided) + epoch : IntervalSet, optional + The epoch on which tuning curves are computed. + If None, the epoch is the time support of the feature. + bounds : list, optional + The min and max boundaries of the tuning curves given as: + [[min_x1, min_x2, ...], [max_x1, max_x2, ...]] + If None, the boundaries are inferred from the target features + + Returns + ------- + tuple + A tuple containing: \n + tc (dict): Dictionary of the tuning curves.\n + bin_centers (list): List of bins center for each dimension + + Raises + ------ + ValueError + If num_bins is a list with a different length than the number of feature dimensions. + If bounds is not of length 2 or if the lengths of mins and maxs do not match the number of feature dimensions. + + """ + + # test group + # if isinstance(group, nap.TsdFrame): + # group = group.restrict(ep) + # elif isinstance(group, nap.TsGroup): + # group = group.restrict(ep) + # elif isinstance(group, dict): + # group = nap.TsGroup(group, time_support=ep) + # else: + # raise TypeError("Unknown format for group") + + # test features + if isinstance(features, nap.Tsd): + features = nap.TsdFrame( + d=features, t=features.times(), ep=features.time_support + ) + elif not isinstance(features, nap.TsdFrame): + raise TypeError("feature should be a Tsd or TsdFrame") + + # test num_bins + if isinstance(num_bins, list): + if len(num_bins) != features.shape[1]: + raise ValueError( + "If num_bins is a list, it should have the same length as the number of feature dimensions." + ) + elif not isinstance(num_bins, int): + raise TypeError( + "num_bins should be of type int or list with length equal to number of feature dimensions." + ) + else: + num_bins = [num_bins] * features.shape[1] + num_dims = features.shape[1] + + # test minmax + if bounds is not None: + if len(bounds) != 2: + raise ValueError( + "bounds should be of length 2, containing mins and maxs for each feature." + ) + if len(bounds[0]) != features.shape[1] or len(bounds[1]) != features.shape[1]: + raise ValueError( + "bounds should have the same length as the number of feature dimensions." + ) + + # test ep + if epoch is None: + epoch = features.time_support + else: + features = features.restrict(epoch) + + # Occupancy + if bounds is None: + occupancy, bin_edges = np.histogramdd(features.values, bins=num_bins) + else: + bin_edges = [ + np.linspace(low, high, n + 1) + for low, high, n in zip(bounds[0], bounds[1], num_bins, strict=True) + ] + occupancy, _ = np.histogramdd(features.values, bins=bin_edges) + occupancy[occupancy == 0] = np.nan # avoid /0 + + # Tuning curves + tcs = {} + group_vals = {d: group.value_from(features[:, d], epoch) for d in range(num_dims)} + for n in group.keys(): + data = np.column_stack( + [group_vals[d][n].values.flatten() for d in range(num_dims)] + ) + count, _ = np.histogramdd(data, bins=bin_edges) + tcs[n] = (count / occupancy) * features.rate + + return tcs, [e[:-1] + np.diff(e) / 2 for e in bin_edges] + + @_validate_tuning_inputs def compute_discrete_tuning_curves(group, dict_ep): """ diff --git a/tests/test_tuning_curves_general.py b/tests/test_tuning_curves_general.py new file mode 100644 index 000000000..fd0c0eac0 --- /dev/null +++ b/tests/test_tuning_curves_general.py @@ -0,0 +1,136 @@ +"""Tests of tuning curves for `pynapple` package.""" + +import itertools +from contextlib import nullcontext as does_not_raise + +import numpy as np +import pytest + +import pynapple as nap + + +def get_group( + n_units: int = 2, duration: float = 100.0, mean_rate_hz: float = 5.0 +) -> nap.TsGroup: + units = {} + for k in range(n_units): + n_spikes = np.random.poisson(mean_rate_hz * duration) + spike_times = np.random.uniform(0.0, duration, size=n_spikes) + spike_times.sort() + units[k] = nap.Ts(t=spike_times) + + return nap.TsGroup(units) + + +def get_features(num_dims: int, duration: float = 100.0, dt: float = 0.1): + t = np.arange(0.0, duration, dt) + + # Saw‑tooth features, each phase‑shifted so they differ + data = np.column_stack([(t + i / num_dims) % 1.0 for i in range(num_dims)]) + + # Wrap in a TsdFrame with a matching time_support + return nap.TsdFrame(t=t, d=data, time_support=nap.IntervalSet(0.0, duration)) + + +@pytest.mark.parametrize( + "num_dims, num_bins", + [ + (num_dims, num_bins) + for num_dims in range(1, 4) + for num_bins in ( + [1, 5, 10] + + [ + list(tup) + for tup in itertools.product([1, 5, 10], repeat=num_dims) + if num_dims > 1 + ] + ) + ], +) +@pytest.mark.parametrize("bounds_alpha", [None, 0.0, 0.2]) +@pytest.mark.parametrize( + "epoch", + [ + None, + nap.IntervalSet(0.0, 50.0), + nap.IntervalSet(0.0, 100.0), + nap.IntervalSet(0.0, 200.0), + nap.IntervalSet([0.0, 40.0], [10.0, 90.0]), + ], +) +def test_compute_tuning_curves(num_dims, num_bins, bounds_alpha, epoch): + group = get_group() + features = get_features(num_dims) + + if bounds_alpha is None: + bounds = None + else: + full_min = np.nanmin(features.values, axis=0) + full_max = np.nanmax(features.values, axis=0) + span = full_max - full_min + bounds = np.vstack( + [full_min + bounds_alpha * span, full_max - bounds_alpha * span] + ) # shape (2, num_dims) + + # ------------------------------------------------------------------ + # compute actual + # ------------------------------------------------------------------ + tcs, tc_bins = nap.compute_tuning_curves( + group=group, + features=features, + num_bins=num_bins, + bounds=bounds, + epoch=epoch, + ) + + # ------------------------------------------------------------------ + # compute expected + # ------------------------------------------------------------------ + _features = features if epoch is None else features.restrict(epoch) + _num_bins = [num_bins] * num_dims if isinstance(num_bins, int) else num_bins + + # build edges identical to what the function *should* have used + if bounds is None: + occupancy, bin_edges = np.histogramdd(_features.values, bins=_num_bins) + else: + bin_edges = [ + np.linspace(low, high, n + 1) + for low, high, n in zip(bounds[0], bounds[1], _num_bins, strict=True) + ] + occupancy, _ = np.histogramdd(_features.values, bins=bin_edges) + occupancy[occupancy == 0] = np.nan # avoid /0 + + # tuning curves + expected_tcs = {} + group_vals = {d: group.value_from(_features[:, d], epoch) for d in range(num_dims)} + for k in group.keys(): + spike_feat = np.column_stack( + [group_vals[d][k].values.flatten() for d in range(num_dims)] + ) + counts, _ = np.histogramdd(spike_feat, bins=bin_edges) + expected_tcs[k] = (counts / occupancy) * _features.rate + + # expected bin centres + expected_tc_bins = [e[:-1] + np.diff(e) / 2 for e in bin_edges] + + # ------------------------------------------------------------------ + # test + # ------------------------------------------------------------------ + assert isinstance(tcs, dict) + assert len(tcs) == len(expected_tcs) == len(group) + for (key, tc), (expected_key, expected_tc) in zip( + tcs.items(), expected_tcs.items() + ): + assert key == expected_key + assert isinstance(tc, np.ndarray) + assert tc.ndim == num_dims + assert tc.shape == tuple(_num_bins) + np.testing.assert_almost_equal(tc, expected_tc) + + assert isinstance(tc_bins, list) + assert len(tc_bins) == len(expected_tc_bins) == num_dims + for bins, expected_bins, expected_size in zip(tc_bins, expected_tc_bins, _num_bins): + assert isinstance(bins, np.ndarray) + assert bins.ndim == 1 + assert bins.size == expected_size + np.testing.assert_allclose(bins, expected_bins) From 1b4c3de489eab1bb106c6f1700f269075af6a6c1 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 1 Jul 2025 20:49:16 +0000 Subject: [PATCH 004/244] continuous addition --- pynapple/process/tuning_curves.py | 41 ++++++++++++++++++++--------- tests/test_tuning_curves_general.py | 20 +++++++------- 2 files changed, 40 insertions(+), 21 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 7db9a8758..6db2c6cad 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -127,7 +127,7 @@ def compute_tuning_curves(group, features, num_bins, epoch=None, bounds=None): # else: # raise TypeError("Unknown format for group") - # test features + # check features if isinstance(features, nap.Tsd): features = nap.TsdFrame( d=features, t=features.times(), ep=features.time_support @@ -135,7 +135,7 @@ def compute_tuning_curves(group, features, num_bins, epoch=None, bounds=None): elif not isinstance(features, nap.TsdFrame): raise TypeError("feature should be a Tsd or TsdFrame") - # test num_bins + # check num_bins if isinstance(num_bins, list): if len(num_bins) != features.shape[1]: raise ValueError( @@ -149,7 +149,7 @@ def compute_tuning_curves(group, features, num_bins, epoch=None, bounds=None): num_bins = [num_bins] * features.shape[1] num_dims = features.shape[1] - # test minmax + # check minmax if bounds is not None: if len(bounds) != 2: raise ValueError( @@ -160,7 +160,7 @@ def compute_tuning_curves(group, features, num_bins, epoch=None, bounds=None): "bounds should have the same length as the number of feature dimensions." ) - # test ep + # check ep if epoch is None: epoch = features.time_support else: @@ -175,17 +175,34 @@ def compute_tuning_curves(group, features, num_bins, epoch=None, bounds=None): for low, high, n in zip(bounds[0], bounds[1], num_bins, strict=True) ] occupancy, _ = np.histogramdd(features.values, bins=bin_edges) - occupancy[occupancy == 0] = np.nan # avoid /0 + occupancy[occupancy == 0] = np.nan # Tuning curves - tcs = {} group_vals = {d: group.value_from(features[:, d], epoch) for d in range(num_dims)} - for n in group.keys(): - data = np.column_stack( - [group_vals[d][n].values.flatten() for d in range(num_dims)] - ) - count, _ = np.histogramdd(data, bins=bin_edges) - tcs[n] = (count / occupancy) * features.rate + if isinstance(group, nap.TsGroup): + tcs = {} + for n in group.keys(): + data = np.column_stack( + [group_vals[d][n].values.flatten() for d in range(num_dims)] + ) + count, _ = np.histogramdd(data, bins=bin_edges) + tcs[n] = (count / occupancy) * features.rate + else: + idxs = [ + np.clip( + np.digitize(group_vals[d].values, bin_edges[d]) - 1, 0, num_bins[d] - 1 + ) + for d in range(num_dims) + ] + flat = np.ravel_multi_index(tuple(idxs), num_bins) + flat_bins = np.prod(num_bins) + sums = np.zeros((flat_bins, group.shape[1])) + counts = np.zeros(flat_bins, dtype=int) + np.add.at(sums, flat, group.values) + np.add.at(counts, flat, 1) + means = sums / counts[:, None] + tcs = means.reshape((*num_bins, group.shape[1])).transpose(-1, *range(num_dims)) + tcs[:, occupancy == np.nan] = np.nan return tcs, [e[:-1] + np.diff(e) / 2 for e in bin_edges] diff --git a/tests/test_tuning_curves_general.py b/tests/test_tuning_curves_general.py index fd0c0eac0..bef683932 100644 --- a/tests/test_tuning_curves_general.py +++ b/tests/test_tuning_curves_general.py @@ -52,14 +52,16 @@ def get_features(num_dims: int, duration: float = 100.0, dt: float = 0.1): "epoch", [ None, - nap.IntervalSet(0.0, 50.0), - nap.IntervalSet(0.0, 100.0), - nap.IntervalSet(0.0, 200.0), - nap.IntervalSet([0.0, 40.0], [10.0, 90.0]), + # nap.IntervalSet(0.0, 50.0), + # nap.IntervalSet(0.0, 100.0), + # nap.IntervalSet(0.0, 200.0), + # nap.IntervalSet([0.0, 40.0], [10.0, 90.0]), ], ) -def test_compute_tuning_curves(num_dims, num_bins, bounds_alpha, epoch): - group = get_group() +@pytest.mark.parametrize("continuous", [True]) +def test_compute_tuning_curves(continuous, num_dims, num_bins, bounds_alpha, epoch): + _group = get_group() + group = _group.count(0.1) if continuous else _group features = get_features(num_dims) if bounds_alpha is None: @@ -102,8 +104,8 @@ def test_compute_tuning_curves(num_dims, num_bins, bounds_alpha, epoch): # tuning curves expected_tcs = {} - group_vals = {d: group.value_from(_features[:, d], epoch) for d in range(num_dims)} - for k in group.keys(): + group_vals = {d: _group.value_from(_features[:, d], epoch) for d in range(num_dims)} + for k in _group.keys(): spike_feat = np.column_stack( [group_vals[d][k].values.flatten() for d in range(num_dims)] ) @@ -117,7 +119,7 @@ def test_compute_tuning_curves(num_dims, num_bins, bounds_alpha, epoch): # test # ------------------------------------------------------------------ assert isinstance(tcs, dict) - assert len(tcs) == len(expected_tcs) == len(group) + assert len(tcs) == len(expected_tcs) == len(_group) for (key, tc), (expected_key, expected_tc) in zip( tcs.items(), expected_tcs.items() ): From 29a4ad8b3bccd491b166a15b912fd393604c6847 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 2 Jul 2025 20:50:16 +0000 Subject: [PATCH 005/244] changing inputs to bins&range, matching numpy --- pynapple/process/tuning_curves.py | 140 ++++++++++----------------- tests/test_tuning_curves_general.py | 144 ++++++++++++++++------------ 2 files changed, 133 insertions(+), 151 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 6db2c6cad..e60c5dabe 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -10,6 +10,7 @@ import numpy as np import pandas as pd +from scipy.stats import binned_statistic_dd from .. import core as nap @@ -81,7 +82,7 @@ def wrapper(*args, **kwargs): return wrapper -def compute_tuning_curves(group, features, num_bins, epoch=None, bounds=None): +def compute_tuning_curves(group, features, bins, range=None, epochs=None): """ Computes n-dimensional tuning curves relative to n features. @@ -89,18 +90,25 @@ def compute_tuning_curves(group, features, num_bins, epoch=None, bounds=None): ---------- group : TsGroup, TsdFrame or dict of Ts/Tsd object. The group of Ts/Tsd for which the tuning curves will be computed - You may also pass a TsdFrame with smoothed rates (recommended). - features : TsdFrame + features : Tsd/TsdFrame The features (i.e. one column per feature). - num_bins : int or list - Number of bins in the tuning curves (can be separate for each feature dimension, if list provided) - epoch : IntervalSet, optional - The epoch on which tuning curves are computed. - If None, the epoch is the time support of the feature. - bounds : list, optional - The min and max boundaries of the tuning curves given as: - [[min_x1, min_x2, ...], [max_x1, max_x2, ...]] - If None, the boundaries are inferred from the target features + bins : sequence or int + The bin specification: + + * A sequence of arrays describing the monotonically increasing bin + edges along each dimension. + * The number of bins for each dimension (nx, ny, ... =bins) + * The number of bins for all dimensions (nx=ny=...=bins). + range : sequence, optional + A sequence of entries per feature, each an optional (lower, upper) tuple giving + the outer bin edges to be used if the edges are not given explicitly in + `bins`. + An entry of None in the sequence results in the minimum and maximum + values being used for the corresponding dimension. + The default, None, is equivalent to passing a tuple of D None values. + epochs : IntervalSet, optional + The epochs on which tuning curves are computed. + If None, the epochs are the time support of the features. Returns ------- @@ -109,100 +117,54 @@ def compute_tuning_curves(group, features, num_bins, epoch=None, bounds=None): tc (dict): Dictionary of the tuning curves.\n bin_centers (list): List of bins center for each dimension - Raises - ------ - ValueError - If num_bins is a list with a different length than the number of feature dimensions. - If bounds is not of length 2 or if the lengths of mins and maxs do not match the number of feature dimensions. - """ - # test group - # if isinstance(group, nap.TsdFrame): - # group = group.restrict(ep) - # elif isinstance(group, nap.TsGroup): - # group = group.restrict(ep) - # elif isinstance(group, dict): - # group = nap.TsGroup(group, time_support=ep) - # else: - # raise TypeError("Unknown format for group") + # check group + if isinstance(group, dict): + group = nap.TsGroup(group) + elif not isinstance(group, nap.TsGroup | nap.TsdFrame): + raise TypeError("Unknown format for group") + + # check ep + if epochs is None: + epochs = features.time_support + group = group.restrict(epochs) + else: + features = features.restrict(epochs) + group = group.restrict(epochs) # check features if isinstance(features, nap.Tsd): features = nap.TsdFrame( - d=features, t=features.times(), ep=features.time_support + d=features.values, + t=features.times(), + time_support=features.time_support, ) elif not isinstance(features, nap.TsdFrame): raise TypeError("feature should be a Tsd or TsdFrame") - # check num_bins - if isinstance(num_bins, list): - if len(num_bins) != features.shape[1]: - raise ValueError( - "If num_bins is a list, it should have the same length as the number of feature dimensions." - ) - elif not isinstance(num_bins, int): - raise TypeError( - "num_bins should be of type int or list with length equal to number of feature dimensions." - ) - else: - num_bins = [num_bins] * features.shape[1] - num_dims = features.shape[1] - - # check minmax - if bounds is not None: - if len(bounds) != 2: - raise ValueError( - "bounds should be of length 2, containing mins and maxs for each feature." - ) - if len(bounds[0]) != features.shape[1] or len(bounds[1]) != features.shape[1]: - raise ValueError( - "bounds should have the same length as the number of feature dimensions." - ) - - # check ep - if epoch is None: - epoch = features.time_support - else: - features = features.restrict(epoch) - - # Occupancy - if bounds is None: - occupancy, bin_edges = np.histogramdd(features.values, bins=num_bins) - else: - bin_edges = [ - np.linspace(low, high, n + 1) - for low, high, n in zip(bounds[0], bounds[1], num_bins, strict=True) - ] - occupancy, _ = np.histogramdd(features.values, bins=bin_edges) + # occupancy + occupancy, bin_edges = np.histogramdd(features.values, bins=bins, range=range) occupancy[occupancy == 0] = np.nan - # Tuning curves - group_vals = {d: group.value_from(features[:, d], epoch) for d in range(num_dims)} + # tuning curves + tcs = {} if isinstance(group, nap.TsGroup): - tcs = {} for n in group.keys(): - data = np.column_stack( - [group_vals[d][n].values.flatten() for d in range(num_dims)] + count, _ = np.histogramdd( + group[n].value_from(features, epochs).values, + bins=bin_edges, ) - count, _ = np.histogramdd(data, bins=bin_edges) tcs[n] = (count / occupancy) * features.rate else: - idxs = [ - np.clip( - np.digitize(group_vals[d].values, bin_edges[d]) - 1, 0, num_bins[d] - 1 - ) - for d in range(num_dims) - ] - flat = np.ravel_multi_index(tuple(idxs), num_bins) - flat_bins = np.prod(num_bins) - sums = np.zeros((flat_bins, group.shape[1])) - counts = np.zeros(flat_bins, dtype=int) - np.add.at(sums, flat, group.values) - np.add.at(counts, flat, 1) - means = sums / counts[:, None] - tcs = means.reshape((*num_bins, group.shape[1])).transpose(-1, *range(num_dims)) - tcs[:, occupancy == np.nan] = np.nan + _tcs = binned_statistic_dd( + group.value_from(features, epochs).values, + values=group.values.T, + bins=bin_edges, + ).statistic + _tcs[:, np.isnan(occupancy)] = np.nan + for k, tc in zip(group.columns, _tcs): + tcs[k] = tc return tcs, [e[:-1] + np.diff(e) / 2 for e in bin_edges] diff --git a/tests/test_tuning_curves_general.py b/tests/test_tuning_curves_general.py index bef683932..3e1e702ba 100644 --- a/tests/test_tuning_curves_general.py +++ b/tests/test_tuning_curves_general.py @@ -5,12 +5,13 @@ import numpy as np import pytest +import scipy import pynapple as nap def get_group( - n_units: int = 2, duration: float = 100.0, mean_rate_hz: float = 5.0 + n_units: int, duration: float = 100.0, mean_rate_hz: float = 5.0 ) -> nap.TsGroup: units = {} for k in range(n_units): @@ -24,55 +25,63 @@ def get_group( def get_features(num_dims: int, duration: float = 100.0, dt: float = 0.1): t = np.arange(0.0, duration, dt) - # Saw‑tooth features, each phase‑shifted so they differ data = np.column_stack([(t + i / num_dims) % 1.0 for i in range(num_dims)]) - # Wrap in a TsdFrame with a matching time_support return nap.TsdFrame(t=t, d=data, time_support=nap.IntervalSet(0.0, duration)) @pytest.mark.parametrize( - "num_dims, num_bins", + "group", [ - (num_dims, num_bins) - for num_dims in range(1, 4) - for num_bins in ( - [1, 5, 10] - + [ - list(tup) - for tup in itertools.product([1, 5, 10], repeat=num_dims) - if num_dims > 1 - ] + group.count(0.1) if continuous else group + for continuous in [False, True] + for n_units in range(1, 4) + if (group := get_group(n_units)) + ], +) +@pytest.mark.parametrize( + "features, bins", + [ + (get_features(D), bins) + for D in range(1, 4) + for bins in ( + [2, 5, 10] + + [list(tup) for tup in itertools.product([2, 5, 10], repeat=D) if D > 1] + ) + ] + + [ + ( + nap.Tsd( + t=tsdframe.times(), + d=tsdframe.values.flatten(), + time_support=tsdframe.time_support, + ), + num_bins, ) + for num_bins in [2, 5, 10] + if (tsdframe := get_features(num_dims=1)) ], ) -@pytest.mark.parametrize("bounds_alpha", [None, 0.0, 0.2]) +@pytest.mark.parametrize("range_alpha", [None, 0.0, 0.5]) @pytest.mark.parametrize( - "epoch", + "epochs", [ None, - # nap.IntervalSet(0.0, 50.0), - # nap.IntervalSet(0.0, 100.0), - # nap.IntervalSet(0.0, 200.0), - # nap.IntervalSet([0.0, 40.0], [10.0, 90.0]), + nap.IntervalSet(0.0, 50.0), + nap.IntervalSet(0.0, 100.0), + nap.IntervalSet(0.0, 200.0), + nap.IntervalSet([0.0, 40.0], [10.0, 90.0]), ], ) -@pytest.mark.parametrize("continuous", [True]) -def test_compute_tuning_curves(continuous, num_dims, num_bins, bounds_alpha, epoch): - _group = get_group() - group = _group.count(0.1) if continuous else _group - features = get_features(num_dims) - - if bounds_alpha is None: - bounds = None +def test_compute_tuning_curves(group, features, bins, range_alpha, epochs): + if range_alpha is None: + range = None else: full_min = np.nanmin(features.values, axis=0) full_max = np.nanmax(features.values, axis=0) span = full_max - full_min - bounds = np.vstack( - [full_min + bounds_alpha * span, full_max - bounds_alpha * span] - ) # shape (2, num_dims) + range = np.c_[full_min + range_alpha * span, full_max - range_alpha * span] # ------------------------------------------------------------------ # compute actual @@ -80,37 +89,50 @@ def test_compute_tuning_curves(continuous, num_dims, num_bins, bounds_alpha, epo tcs, tc_bins = nap.compute_tuning_curves( group=group, features=features, - num_bins=num_bins, - bounds=bounds, - epoch=epoch, + bins=bins, + range=range, + epochs=epochs, ) # ------------------------------------------------------------------ # compute expected # ------------------------------------------------------------------ - _features = features if epoch is None else features.restrict(epoch) - _num_bins = [num_bins] * num_dims if isinstance(num_bins, int) else num_bins - - # build edges identical to what the function *should* have used - if bounds is None: - occupancy, bin_edges = np.histogramdd(_features.values, bins=_num_bins) + if epochs is None: + epochs = features.time_support + group = group.restrict(epochs) else: - bin_edges = [ - np.linspace(low, high, n + 1) - for low, high, n in zip(bounds[0], bounds[1], _num_bins, strict=True) - ] - occupancy, _ = np.histogramdd(_features.values, bins=bin_edges) - occupancy[occupancy == 0] = np.nan # avoid /0 - - # tuning curves - expected_tcs = {} - group_vals = {d: _group.value_from(_features[:, d], epoch) for d in range(num_dims)} - for k in _group.keys(): - spike_feat = np.column_stack( - [group_vals[d][k].values.flatten() for d in range(num_dims)] + features = features.restrict(epochs) + group = group.restrict(epochs) + + if isinstance(features, nap.Tsd): + features = nap.TsdFrame( + d=features.values, + t=features.times(), + time_support=features.time_support, ) - counts, _ = np.histogramdd(spike_feat, bins=bin_edges) - expected_tcs[k] = (counts / occupancy) * _features.rate + + # Occupancy + occupancy, bin_edges = np.histogramdd(features.values, bins=bins, range=range) + occupancy[occupancy == 0] = np.nan + + # Tuning curves + expected_tcs = {} + if isinstance(group, nap.TsGroup): + for n in group.keys(): + count, _ = np.histogramdd( + group[n].value_from(features, epochs).values, + bins=bin_edges, + ) + expected_tcs[n] = (count / occupancy) * features.rate + else: + _expected_tcs = scipy.stats.binned_statistic_dd( + group.value_from(features, epochs).values, + values=group.values.T, + bins=bin_edges, + ).statistic + _expected_tcs[:, np.isnan(occupancy)] = np.nan + for k, tc in zip(group.columns, _expected_tcs): + expected_tcs[k] = tc # expected bin centres expected_tc_bins = [e[:-1] + np.diff(e) / 2 for e in bin_edges] @@ -119,20 +141,18 @@ def test_compute_tuning_curves(continuous, num_dims, num_bins, bounds_alpha, epo # test # ------------------------------------------------------------------ assert isinstance(tcs, dict) - assert len(tcs) == len(expected_tcs) == len(_group) + assert len(tcs) == len(expected_tcs) for (key, tc), (expected_key, expected_tc) in zip( tcs.items(), expected_tcs.items() ): assert key == expected_key assert isinstance(tc, np.ndarray) - assert tc.ndim == num_dims - assert tc.shape == tuple(_num_bins) - np.testing.assert_almost_equal(tc, expected_tc) + assert tc.shape == expected_tc.shape + np.testing.assert_allclose(tc, expected_tc) assert isinstance(tc_bins, list) - assert len(tc_bins) == len(expected_tc_bins) == num_dims - for bins, expected_bins, expected_size in zip(tc_bins, expected_tc_bins, _num_bins): + assert len(tc_bins) == len(expected_tc_bins) + for bins, expected_bins in zip(tc_bins, expected_tc_bins): assert isinstance(bins, np.ndarray) - assert bins.ndim == 1 - assert bins.size == expected_size + assert bins.shape == expected_bins.shape np.testing.assert_allclose(bins, expected_bins) From 8c0af2c782fcf0998f497a735f413431829584bd Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 3 Jul 2025 19:42:25 +0000 Subject: [PATCH 006/244] tests for general tuning curve + wrappers for the old function --- pynapple/process/tuning_curves.py | 481 +++++++++------------------- pyproject.toml | 3 +- tests/test_tuning_curves.py | 241 -------------- tests/test_tuning_curves_general.py | 194 +++++++++-- 4 files changed, 307 insertions(+), 612 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index e60c5dabe..2fd39a716 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -10,6 +10,7 @@ import numpy as np import pandas as pd +import xarray as xr from scipy.stats import binned_statistic_dd from .. import core as nap @@ -82,7 +83,7 @@ def wrapper(*args, **kwargs): return wrapper -def compute_tuning_curves(group, features, bins, range=None, epochs=None): +def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs=None): """ Computes n-dimensional tuning curves relative to n features. @@ -109,29 +110,27 @@ def compute_tuning_curves(group, features, bins, range=None, epochs=None): epochs : IntervalSet, optional The epochs on which tuning curves are computed. If None, the epochs are the time support of the features. + fs : float, optional + The exact sampling frequency of the features used to normalise the tuning curves. + Unit should match that of the features. If not passed, it is estimated. Returns ------- - tuple - A tuple containing: \n - tc (dict): Dictionary of the tuning curves.\n - bin_centers (list): List of bins center for each dimension - + xarray + An xarray DataArray containing the tuning curves with labeled dimensions. """ # check group if isinstance(group, dict): group = nap.TsGroup(group) + if isinstance(group, nap.Tsd): + group = nap.TsdFrame( + d=group.values, + t=group.times(), + time_support=group.time_support, + ) elif not isinstance(group, nap.TsGroup | nap.TsdFrame): - raise TypeError("Unknown format for group") - - # check ep - if epochs is None: - epochs = features.time_support - group = group.restrict(epochs) - else: - features = features.restrict(epochs) - group = group.restrict(epochs) + raise TypeError("group should be a Tsd, TsdFrame, TsGroup, or dict.") # check features if isinstance(features, nap.Tsd): @@ -141,32 +140,151 @@ def compute_tuning_curves(group, features, bins, range=None, epochs=None): time_support=features.time_support, ) elif not isinstance(features, nap.TsdFrame): - raise TypeError("feature should be a Tsd or TsdFrame") + raise TypeError("features should be a Tsd or TsdFrame.") + + # check ep + if epochs is None: + epochs = features.time_support + elif isinstance(epochs, nap.IntervalSet): + if features.time_support.tot_length() < epochs.tot_length(): + warnings.warn( + "The passed epochs are larger than the time support of the features," + "this will artificially increase the outer bins of the tuning curves.", + UserWarning, + ) + features = features.restrict(epochs) + else: + raise TypeError("epochs should be an IntervalSet.") + group = group.restrict(epochs) + + # check rate + if fs is None: + fs = np.mean(features.time_diff(epochs=epochs).values) + else: + if not isinstance(fs, (int, float)): + raise TypeError("fs should be a number (int or float)") # occupancy - occupancy, bin_edges = np.histogramdd(features.values, bins=bins, range=range) + occupancy, bin_edges = np.histogramdd(features, bins=bins, range=range) occupancy[occupancy == 0] = np.nan # tuning curves - tcs = {} if isinstance(group, nap.TsGroup): - for n in group.keys(): + tcs = np.zeros([len(group), *occupancy.shape]) + for i, n in enumerate(group): count, _ = np.histogramdd( group[n].value_from(features, epochs).values, bins=bin_edges, ) - tcs[n] = (count / occupancy) * features.rate + tcs[i] = (count / occupancy) * fs else: - _tcs = binned_statistic_dd( + tcs = binned_statistic_dd( group.value_from(features, epochs).values, values=group.values.T, bins=bin_edges, ).statistic - _tcs[:, np.isnan(occupancy)] = np.nan - for k, tc in zip(group.columns, _tcs): - tcs[k] = tc + tcs[:, np.isnan(occupancy)] = np.nan + + return xr.DataArray( + tcs, + name="tuning curves", + coords={ + "unit": group.keys() if isinstance(group, nap.TsGroup) else group.columns, + **{ + (f"f{feature}" if isinstance(feature, int) else feature): e[:-1] + + np.diff(e) / 2 + for feature, e in zip(features.columns, bin_edges) + }, + }, + ) + - return tcs, [e[:-1] + np.diff(e) / 2 for e in bin_edges] +def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): + warnings.warn( + "compute_1d_tuning_curves is deprecated and will be removed in v1.0; " + "use compute_tuning_curves instead.", + DeprecationWarning, + stacklevel=2, + ) + xarray = compute_tuning_curves( + group, + feature, + nb_bins, + range=None if minmax is None else [minmax], + epochs=ep, + ) + return pd.DataFrame( + xarray.values.T, + index=xarray.coords["f0"].values, + columns=xarray.coords["unit"].values, + ) + + +def compute_1d_tuning_curves_continuous( + tsdframe, feature, nb_bins, ep=None, minmax=None +): + warnings.warn( + "compute_1d_tuning_curves_continuous is deprecated and will be removed in v1.0; " + "use compute_tuning_curves instead.", + DeprecationWarning, + stacklevel=2, + ) + xarray = compute_tuning_curves( + tsdframe, + feature, + nb_bins, + range=None if minmax is None else [minmax], + epochs=ep, + ) + return pd.DataFrame( + xarray.values.T, + index=xarray.coords["f0"].values, + columns=xarray.coords["unit"].values, + ) + + +def compute_2d_tuning_curves(group, features, nb_bins, ep=None, minmax=None): + warnings.warn( + "compute_2d_tuning_curves is deprecated and will be removed in v1.0; " + "use compute_tuning_curves instead.", + DeprecationWarning, + stacklevel=2, + ) + xarray = compute_tuning_curves( + group, + features, + nb_bins, + range=( + None if minmax is None else [[minmax[0], minmax[1]], [minmax[2], minmax[3]]] + ), + epochs=ep, + ) + tcs = {c: xarray.sel(unit=c).values for c in xarray.coords["unit"].values} + bins = [xarray.coords[dim].values for dim in xarray.coords if dim != "unit"] + return tcs, bins + + +def compute_2d_tuning_curves_continuous( + tsdframe, features, nb_bins, ep=None, minmax=None +): + warnings.warn( + "compute_2d_tuning_curves_continuous is deprecated and will be removed in v1.0; " + "use compute_tuning_curves instead.", + DeprecationWarning, + stacklevel=2, + ) + xarray = compute_tuning_curves( + tsdframe, + features, + nb_bins, + range=( + None if minmax is None else [[minmax[0], minmax[1]], [minmax[2], minmax[3]]] + ), + epochs=ep, + ) + tcs = {c: xarray.sel(unit=c).values for c in xarray.coords["unit"].values} + bins = [xarray.coords[dim].values for dim in xarray.coords if dim != "unit"] + return tcs, bins @_validate_tuning_inputs @@ -220,148 +338,6 @@ def compute_discrete_tuning_curves(group, dict_ep): return tuning_curves -@_validate_tuning_inputs -def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): - """ - Computes 1-dimensional tuning curves relative to a 1d feature. - - Parameters - ---------- - group : TsGroup - The group of Ts/Tsd for which the tuning curves will be computed - feature : Tsd (or TsdFrame with 1 column only) - The 1-dimensional target feature (e.g. head-direction) - nb_bins : int - Number of bins in the tuning curve - ep : IntervalSet, optional - The epoch on which tuning curves are computed. - If None, the epoch is the time support of the feature. - minmax : tuple or list, optional - The min and max boundaries of the tuning curves. - If None, the boundaries are inferred from the target feature - - Returns - ------- - pandas.DataFrame - DataFrame to hold the tuning curves - - Raises - ------ - RuntimeError - If group is not a TsGroup object. - - """ - if minmax is not None and len(minmax) != 2: - raise ValueError("minmax should be of length 2.") - if ep is None: - ep = feature.time_support - - if minmax is None: - bins = np.linspace(np.nanmin(feature), np.nanmax(feature), nb_bins + 1) - else: - bins = np.linspace(minmax[0], minmax[1], nb_bins + 1) - - idx = bins[0:-1] + np.diff(bins) / 2 - - tuning_curves = pd.DataFrame(index=idx, columns=list(group.keys())) - - group_value = group.value_from(feature, ep) - - occupancy, _ = np.histogram(feature.restrict(ep).values, bins) - - for k in group_value: - count, _ = np.histogram(group_value[k].values, bins) - count = count / occupancy - tuning_curves[k] = count - tuning_curves[k] = count * feature.rate - - return tuning_curves - - -@_validate_tuning_inputs -def compute_2d_tuning_curves(group, features, nb_bins, ep=None, minmax=None): - """ - Computes 2-dimensional tuning curves relative to a 2d features - - Parameters - ---------- - group : TsGroup - The group of Ts/Tsd for which the tuning curves will be computed - features : TsdFrame - The 2d features (i.e. 2 columns features). - nb_bins : int or tuple - Number of bins in the tuning curves (separate for 2 feature dimensions if tuple provided) - ep : IntervalSet, optional - The epoch on which tuning curves are computed. - If None, the epoch is the time support of the feature. - minmax : tuple or list, optional - The min and max boundaries of the tuning curves given as: - (minx, maxx, miny, maxy) - If None, the boundaries are inferred from the target features - - Returns - ------- - tuple - A tuple containing: \n - tc (dict): Dictionary of the tuning curves with dimensions (nb_bins, nb_bins).\n - xy (list): List of bins center in the two dimensions - - Raises - ------ - RuntimeError - If group is not a TsGroup object or if features is not 2 columns only. - - """ - if minmax is not None and len(minmax) != 4: - raise ValueError("minmax should be of length 4.") - - if isinstance(nb_bins, tuple) and len(nb_bins) != 2: - raise ValueError( - "nb_bins should be of type int (or tuple with (int, int) for 2D tuning curves)." - ) - - if isinstance(nb_bins, int): - nb_bins = (nb_bins, nb_bins) - - if ep is None: - ep = features.time_support - else: - features = features.restrict(ep) - - groups_value = {} - binsxy = {} - - for i in range(2): - groups_value[i] = group.value_from(features[:, i], ep) - if minmax is None: - bins = np.linspace( - np.nanmin(features[:, i]), np.nanmax(features[:, i]), nb_bins[i] + 1 - ) - else: - bins = np.linspace(minmax[i + i % 2], minmax[i + 1 + i % 2], nb_bins[i] + 1) - binsxy[i] = bins - - occupancy, _, _ = np.histogram2d( - features[:, 0].values.flatten(), - features[:, 1].values.flatten(), - [binsxy[0], binsxy[1]], - ) - - tc = {} - for n in group.keys(): - count, _, _ = np.histogram2d( - groups_value[0][n].values.flatten(), - groups_value[1][n].values.flatten(), - [binsxy[0], binsxy[1]], - ) - count = count / occupancy - tc[n] = count * features.rate - - xy = [binsxy[i][0:-1] + np.diff(binsxy[i]) / 2 for i in range(2)] - - return tc, xy - - @_validate_tuning_inputs def compute_1d_mutual_info(tc, feature, ep=None, minmax=None, bitssec=False): """ @@ -512,174 +488,3 @@ def compute_2d_mutual_info(dict_tc, features, ep=None, minmax=None, bitssec=Fals SI = SI / fr[:, 0, 0] SI = pd.DataFrame(index=idx, columns=["SI"], data=SI) return SI - - -@_validate_tuning_inputs -def compute_1d_tuning_curves_continuous( - tsdframe, feature, nb_bins, ep=None, minmax=None -): - """ - Computes 1-dimensional tuning curves relative to a feature with continuous data. - - Parameters - ---------- - tsdframe : Tsd or TsdFrame - Input data (e.g. continuous calcium data - where each column is the calcium activity of one neuron) - feature : Tsd (or TsdFrame with 1 column only) - The 1-dimensional target feature (e.g. head-direction) - nb_bins : int - Number of bins in the tuning curves - ep : IntervalSet, optional - The epoch on which tuning curves are computed. - If None, the epoch is the time support of the feature. - minmax : tuple or list, optional - The min and max boundaries of the tuning curves. - If None, the boundaries are inferred from the target feature - - Returns - ------- - pandas.DataFrame to hold the tuning curves - - Raises - ------ - RuntimeError - If tsdframe is not a Tsd or a TsdFrame object. - - """ - if minmax is not None and len(minmax) != 2: - raise ValueError("minmax should be of length 2.") - - feature = np.squeeze(feature) - - if isinstance(ep, nap.IntervalSet): - feature = feature.restrict(ep) - tsdframe = tsdframe.restrict(ep) - else: - tsdframe = tsdframe.restrict(feature.time_support) - - if isinstance(tsdframe, nap.Tsd): - tsdframe = tsdframe[:, np.newaxis] - - if minmax is None: - bins = np.linspace(np.nanmin(feature), np.nanmax(feature), nb_bins + 1) - else: - bins = np.linspace(minmax[0], minmax[1], nb_bins + 1) - - align_times = tsdframe.value_from(feature) - idx = np.digitize(align_times.values, bins) - 1 - - tc = np.zeros((len(bins) - 1, tsdframe.shape[1])) - for i in range(0, nb_bins): - tc[i] = np.mean(tsdframe.values[idx == i], axis=0) - tc[np.isnan(tc)] = 0.0 - - # Assigning nans if bin is not visited. - occupancy, _ = np.histogram(feature, bins) - tc[occupancy == 0.0] = np.nan - - tc = pd.DataFrame( - index=bins[0:-1] + np.diff(bins) / 2, data=tc, columns=tsdframe.columns - ) - return tc - - -@_validate_tuning_inputs -def compute_2d_tuning_curves_continuous( - tsdframe, features, nb_bins, ep=None, minmax=None -): - """ - Computes 2-dimensional tuning curves relative to a 2d feature with continuous data. - - Parameters - ---------- - tsdframe : Tsd or TsdFrame - Input data (e.g. continuous calcium data - where each column is the calcium activity of one neuron) - features : TsdFrame - The 2d feature (two columns) - nb_bins : int or tuple - Number of bins in the tuning curves (separate for 2 feature dimensions if tuple provided) - ep : IntervalSet, optional - The epoch on which tuning curves are computed. - If None, the epoch is the time support of the feature. - minmax : tuple or list, optional - The min and max boundaries of the tuning curves. - Should be a tuple of minx, maxx, miny, maxy - If None, the boundaries are inferred from the target feature - - Returns - ------- - tuple - A tuple containing: \n - tc (dict): Dictionary of the tuning curves with dimensions (nb_bins, nb_bins).\n - xy (list): List of bins center in the two dimensions - - Raises - ------ - RuntimeError - If tsdframe is not a Tsd/TsdFrame or if features is not 2 columns - - """ - if minmax is not None and len(minmax) != 4: - raise ValueError("minmax should be of length 4.") - - if isinstance(nb_bins, tuple) and len(nb_bins) != 2: - raise ValueError( - "nb_bins should be of type int (or tuple with (int, int) for 2D tuning curves)." - ) - - if isinstance(ep, nap.IntervalSet): - features = features.restrict(ep) - tsdframe = tsdframe.restrict(ep) - else: - tsdframe = tsdframe.restrict(features.time_support) - - if isinstance(tsdframe, nap.Tsd): - tsdframe = tsdframe[:, np.newaxis] - - if isinstance(nb_bins, int): - nb_bins = (nb_bins, nb_bins) - - binsxy = [] - idxs = [] - - for i in range(2): - if minmax is None: - bins = np.linspace( - np.nanmin(features[:, i]), np.nanmax(features[:, i]), nb_bins[i] + 1 - ) - else: - bins = np.linspace(minmax[i + i % 2], minmax[i + 1 + i % 2], nb_bins[i] + 1) - - align_times = tsdframe.value_from(features[:, i], ep) - idxs.append(np.digitize(align_times.values.flatten(), bins) - 1) - binsxy.append(bins) - - idxs = np.transpose(np.array(idxs)) - - tc = np.zeros((tsdframe.shape[1], nb_bins[0], nb_bins[1])) - - for i in range(nb_bins[0]): - for j in range(nb_bins[1]): - tc[:, i, j] = np.mean( - tsdframe.values[np.logical_and(idxs[:, 0] == i, idxs[:, 1] == j)], 0 - ) - - tc[np.isnan(tc)] = 0.0 - - # Assigning nans if bin is not visited. - occupancy, _, _ = np.histogram2d( - features[:, 0].values.flatten(), - features[:, 1].values.flatten(), - [binsxy[0], binsxy[1]], - ) - occupancy = occupancy[np.newaxis, :, :] - occupancy = np.repeat(occupancy, len(tc), axis=0) - tc[occupancy == 0.0] = np.nan - - xy = [binsxy[i][0:-1] + np.diff(binsxy[i]) / 2 for i in range(2)] - - tc = {c: tc[i] for i, c in enumerate(tsdframe.columns)} - - return tc, xy diff --git a/pyproject.toml b/pyproject.toml index d8c2cff06..86a716fc7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,8 @@ dependencies = [ "pynwb>=2.0.0", "tabulate", "h5py", - "rich" + "rich", + "xarray>=2023.1.0", ] requires-python = ">=3.8" diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index bb54891a2..8359c3f88 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -75,82 +75,6 @@ def test_compute_discrete_tuning_curves_errors(group, dict_ep, expected_exceptio nap.compute_discrete_tuning_curves(group, dict_ep) -@pytest.mark.parametrize( - "group, feature, nb_bins, ep, minmax, expected_exception", - [ - ("a", get_feature(), 10, get_ep(), (0, 1), "group should be a TsGroup."), - ( - get_group(), - "a", - 10, - get_ep(), - (0, 1), - r"feature should be a Tsd \(or TsdFrame with 1 column only\)", - ), - ( - get_group(), - get_feature(), - "a", - get_ep(), - (0, 1), - r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\).", - ), - (get_group(), get_feature(), 10, "a", (0, 1), r"ep should be an IntervalSet"), - ( - get_group(), - get_feature(), - 10, - get_ep(), - 1, - r"minmax should be a tuple\/list of 2 numbers", - ), - ], -) -def test_compute_1d_tuning_curves_errors( - group, feature, nb_bins, ep, minmax, expected_exception -): - with pytest.raises(TypeError, match=expected_exception): - nap.compute_1d_tuning_curves(group, feature, nb_bins, ep, minmax) - - -@pytest.mark.parametrize( - "group, features, nb_bins, ep, minmax, expected_exception", - [ - ("a", get_features(), 10, get_ep(), (0, 1), "group should be a TsGroup."), - ( - get_group(), - "a", - 10, - get_ep(), - (0, 1), - r"features should be a TsdFrame with 2 columns", - ), - ( - get_group(), - get_features(), - "a", - get_ep(), - (0, 1), - r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\).", - ), - (get_group(), get_features(), 10, "a", (0, 1), r"ep should be an IntervalSet"), - ( - get_group(), - get_features(), - 10, - get_ep(), - 1, - r"minmax should be a tuple\/list of 2 numbers", - ), - ], -) -def test_compute_2d_tuning_curves_errors( - group, features, nb_bins, ep, minmax, expected_exception -): - with pytest.raises(TypeError, match=expected_exception): - nap.compute_2d_tuning_curves(group, features, nb_bins, ep, minmax) - - @pytest.mark.parametrize( "tc, feature, ep, minmax, bitssec, expected_exception", [ @@ -255,171 +179,6 @@ def test_compute_2d_mutual_info_errors( nap.compute_2d_mutual_info(dict_tc, features, ep, minmax, bitssec) -@pytest.mark.parametrize( - "tsdframe, feature, nb_bins, ep, minmax, expected_exception", - [ - ( - "a", - get_feature(), - 10, - get_ep(), - (0, 1), - "Argument tsdframe should be of type Tsd or TsdFrame.", - ), - ( - get_tsdframe(), - "a", - 10, - get_ep(), - (0, 1), - r"feature should be a Tsd \(or TsdFrame with 1 column only\)", - ), - ( - get_tsdframe(), - get_feature(), - "a", - get_ep(), - (0, 1), - r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\).", - ), - ( - get_tsdframe(), - get_feature(), - 10, - "a", - (0, 1), - r"ep should be an IntervalSet", - ), - ( - get_tsdframe(), - get_feature(), - 10, - get_ep(), - 1, - r"minmax should be a tuple\/list of 2 numbers", - ), - ], -) -def test_compute_1d_tuning_curves_continuous_errors( - tsdframe, feature, nb_bins, ep, minmax, expected_exception -): - with pytest.raises(TypeError, match=expected_exception): - nap.compute_1d_tuning_curves_continuous(tsdframe, feature, nb_bins, ep, minmax) - - -@pytest.mark.parametrize( - "tsdframe, features, nb_bins, ep, minmax, expected_exception", - [ - ( - "a", - get_features(), - 10, - get_ep(), - (0, 1), - "Argument tsdframe should be of type Tsd or TsdFrame.", - ), - ( - get_tsdframe(), - "a", - 10, - get_ep(), - (0, 1), - r"features should be a TsdFrame with 2 columns", - ), - ( - get_tsdframe(), - get_features(), - "a", - get_ep(), - (0, 1), - r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\).", - ), - ( - get_tsdframe(), - get_features(), - 10, - "a", - (0, 1), - r"ep should be an IntervalSet", - ), - ( - get_tsdframe(), - get_features(), - 10, - get_ep(), - 1, - r"minmax should be a tuple\/list of 2 numbers", - ), - ], -) -def test_compute_2d_tuning_curves_continuous_errors( - tsdframe, features, nb_bins, ep, minmax, expected_exception -): - with pytest.raises(TypeError, match=expected_exception): - nap.compute_2d_tuning_curves_continuous(tsdframe, features, nb_bins, ep, minmax) - - -######################## -# ValueError test -######################## -@pytest.mark.parametrize( - "func, args, minmax, expected", - [ - ( - nap.compute_1d_tuning_curves, - (get_group(), get_feature(), 10), - (0, 1, 2), - "minmax should be of length 2.", - ), - ( - nap.compute_1d_tuning_curves_continuous, - (get_tsdframe(), get_feature(), 10), - (0, 1, 2), - "minmax should be of length 2.", - ), - ( - nap.compute_2d_tuning_curves, - (get_group(), get_features(), 10), - (0, 1, 2), - "minmax should be of length 4.", - ), - ( - nap.compute_2d_tuning_curves_continuous, - (get_tsdframe(), get_features(), 10), - (0, 1, 2), - "minmax should be of length 4.", - ), - ( - nap.compute_2d_tuning_curves, - (get_group(), nap.TsdFrame(t=np.arange(10), d=np.ones((10, 3))), 10), - (0, 1), - "features should have 2 columns only.", - ), - ( - nap.compute_1d_tuning_curves, - (get_group(), nap.TsdFrame(t=np.arange(10), d=np.ones((10, 3))), 10), - (0, 1), - r"feature should be a Tsd \(or TsdFrame with 1 column only\)", - ), - ( - nap.compute_2d_tuning_curves, - (get_group(), get_features(), (0, 1, 2)), - (0, 1, 2, 3), - r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\).", - ), - ( - nap.compute_2d_tuning_curves_continuous, - (get_tsdframe(), get_features(), (0, 1, 2)), - (0, 1, 2, 3), - r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\).", - ), - ], -) -def test_compute_tuning_curves_value_error(func, args, minmax, expected): - with pytest.raises(ValueError, match=expected): - func(*args, minmax=minmax) - - ######################## # Normal test ######################## diff --git a/tests/test_tuning_curves_general.py b/tests/test_tuning_curves_general.py index 3e1e702ba..bd7eef41d 100644 --- a/tests/test_tuning_curves_general.py +++ b/tests/test_tuning_curves_general.py @@ -6,15 +6,16 @@ import numpy as np import pytest import scipy +import xarray as xr import pynapple as nap def get_group( - n_units: int, duration: float = 100.0, mean_rate_hz: float = 5.0 + num_units: int, duration: float = 100.0, mean_rate_hz: float = 5.0 ) -> nap.TsGroup: units = {} - for k in range(n_units): + for k in range(num_units): n_spikes = np.random.poisson(mean_rate_hz * duration) spike_times = np.random.uniform(0.0, duration, size=n_spikes) spike_times.sort() @@ -28,7 +29,142 @@ def get_features(num_dims: int, duration: float = 100.0, dt: float = 0.1): # Saw‑tooth features, each phase‑shifted so they differ data = np.column_stack([(t + i / num_dims) % 1.0 for i in range(num_dims)]) # Wrap in a TsdFrame with a matching time_support - return nap.TsdFrame(t=t, d=data, time_support=nap.IntervalSet(0.0, duration)) + return nap.TsdFrame( + t=t, + d=data, + time_support=nap.IntervalSet(0.0, duration), + columns=[f"col{i}" for i in range(num_dims)], + ) + + +@pytest.mark.parametrize( + "group, features, kwargs, expectation", + [ + # group + ( + [1], + get_features(1), + {}, + pytest.raises( + TypeError, match="group should be a Tsd, TsdFrame, TsGroup, or dict." + ), + ), + ( + None, + get_features(1), + {}, + pytest.raises( + TypeError, match="group should be a Tsd, TsdFrame, TsGroup, or dict." + ), + ), + (get_group(1), get_features(1), {}, does_not_raise()), + (get_group(3), get_features(1), {}, does_not_raise()), + (get_group(1).count(0.1), get_features(1), {}, does_not_raise()), + (get_group(3).count(0.1), get_features(1), {}, does_not_raise()), + (nap.Tsd(t=[1, 2, 3], d=[1, 1, 1]), get_features(1), {}, does_not_raise()), + ({1: nap.Ts([1, 2, 3])}, get_features(1), {}, does_not_raise()), + ( + {1: nap.Ts([1, 2, 3]), 2: nap.Ts([1, 2, 3])}, + get_features(1), + {}, + does_not_raise(), + ), + # features + ( + get_group(1), + [1], + {}, + pytest.raises(TypeError, match="features should be a Tsd or TsdFrame"), + ), + ( + get_group(1), + None, + {}, + pytest.raises(TypeError, match="features should be a Tsd or TsdFrame"), + ), + ( + get_group(1), + nap.Tsd(d=[1, 1, 1], t=[1, 2, 3]), + {}, + does_not_raise(), + ), + ( + get_group(1), + get_features(3), + {}, + does_not_raise(), + ), + # epochs + ( + get_group(1), + get_features(1), + {"epochs": 1}, + pytest.raises(TypeError, match="epochs should be an IntervalSet."), + ), + ( + get_group(1), + get_features(1), + {"epochs": [1, 2]}, + pytest.raises(TypeError, match="epochs should be an IntervalSet."), + ), + ( + get_group(1), + get_features(1), + {"epochs": None}, + does_not_raise(), + ), + ( + get_group(1), + get_features(1), + {"epochs": nap.IntervalSet(0.0, 50.0)}, + does_not_raise(), + ), + ( + get_group(1), + get_features(1), + {"epochs": nap.IntervalSet([0.0, 30.0], [10.0, 50.0])}, + does_not_raise(), + ), + ( + get_group(1), + get_features(1), + {"epochs": nap.IntervalSet([0.0, 1000.0])}, + pytest.warns( + UserWarning, + match="The passed epochs are larger than the time support of the features," + "this will artificially increase the outer bins of the tuning curves.", + ), + ), + # fs + ( + get_group(1), + get_features(1), + {"fs": "1"}, + pytest.raises(TypeError, match="fs should be a number"), + ), + ( + get_group(1), + get_features(1), + {"fs": []}, + pytest.raises(TypeError, match="fs should be a number"), + ), + ( + get_group(1), + get_features(1), + {"fs": 1}, + does_not_raise(), + ), + ( + get_group(1), + get_features(1), + {"fs": 1.0}, + does_not_raise(), + ), + ], +) +def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation): + with expectation: + nap.compute_tuning_curves(group, features, **kwargs) @pytest.mark.parametrize( @@ -86,7 +222,7 @@ def test_compute_tuning_curves(group, features, bins, range_alpha, epochs): # ------------------------------------------------------------------ # compute actual # ------------------------------------------------------------------ - tcs, tc_bins = nap.compute_tuning_curves( + tcs = nap.compute_tuning_curves( group=group, features=features, bins=bins, @@ -99,40 +235,38 @@ def test_compute_tuning_curves(group, features, bins, range_alpha, epochs): # ------------------------------------------------------------------ if epochs is None: epochs = features.time_support - group = group.restrict(epochs) else: features = features.restrict(epochs) - group = group.restrict(epochs) + group = group.restrict(epochs) if isinstance(features, nap.Tsd): features = nap.TsdFrame( d=features.values, t=features.times(), time_support=features.time_support, + columns=["f0"], ) - # Occupancy - occupancy, bin_edges = np.histogramdd(features.values, bins=bins, range=range) + # occupancy + occupancy, bin_edges = np.histogramdd(features, bins=bins, range=range) occupancy[occupancy == 0] = np.nan - # Tuning curves - expected_tcs = {} + # tuning curves if isinstance(group, nap.TsGroup): - for n in group.keys(): + expected_tcs = np.zeros([len(group), *occupancy.shape]) + for i, n in enumerate(group.keys()): count, _ = np.histogramdd( group[n].value_from(features, epochs).values, bins=bin_edges, ) - expected_tcs[n] = (count / occupancy) * features.rate + expected_tcs[i] = (count / occupancy) * 0.1 else: - _expected_tcs = scipy.stats.binned_statistic_dd( + expected_tcs = scipy.stats.binned_statistic_dd( group.value_from(features, epochs).values, values=group.values.T, bins=bin_edges, ).statistic - _expected_tcs[:, np.isnan(occupancy)] = np.nan - for k, tc in zip(group.columns, _expected_tcs): - expected_tcs[k] = tc + expected_tcs[:, np.isnan(occupancy)] = np.nan # expected bin centres expected_tc_bins = [e[:-1] + np.diff(e) / 2 for e in bin_edges] @@ -140,19 +274,15 @@ def test_compute_tuning_curves(group, features, bins, range_alpha, epochs): # ------------------------------------------------------------------ # test # ------------------------------------------------------------------ - assert isinstance(tcs, dict) - assert len(tcs) == len(expected_tcs) - for (key, tc), (expected_key, expected_tc) in zip( - tcs.items(), expected_tcs.items() - ): - assert key == expected_key - assert isinstance(tc, np.ndarray) - assert tc.shape == expected_tc.shape - np.testing.assert_allclose(tc, expected_tc) - - assert isinstance(tc_bins, list) - assert len(tc_bins) == len(expected_tc_bins) - for bins, expected_bins in zip(tc_bins, expected_tc_bins): - assert isinstance(bins, np.ndarray) - assert bins.shape == expected_bins.shape - np.testing.assert_allclose(bins, expected_bins) + + # values + assert isinstance(tcs, xr.DataArray) + np.testing.assert_allclose(tcs, expected_tcs) + + # labels + units = group.keys() if isinstance(group, nap.TsGroup) else group.columns + assert "unit" in tcs.coords + assert np.all(tcs.coords["unit"] == units) + for dim, (dim_label, bins) in enumerate(list(tcs.coords.items())[1:]): + assert dim_label == features.columns[dim] + np.testing.assert_allclose(bins, expected_tc_bins[dim]) From 6bf94ee49cec1b19a089036b3eb0b500c49cd91b Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 3 Jul 2025 20:11:56 +0000 Subject: [PATCH 007/244] 1/dt = fs --- pynapple/process/tuning_curves.py | 4 ++-- tests/test_tuning_curves.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 2fd39a716..cb55a24aa 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -142,7 +142,7 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= elif not isinstance(features, nap.TsdFrame): raise TypeError("features should be a Tsd or TsdFrame.") - # check ep + # check epochs if epochs is None: epochs = features.time_support elif isinstance(epochs, nap.IntervalSet): @@ -159,7 +159,7 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= # check rate if fs is None: - fs = np.mean(features.time_diff(epochs=epochs).values) + fs = 1 / np.mean(features.time_diff(epochs=epochs).values) else: if not isinstance(fs, (int, float)): raise TypeError("fs should be a number (int or float)") diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 8359c3f88..a755e6555 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -250,7 +250,7 @@ def test_compute_1d_tuning_curves(args, kwargs, expected): ( (get_group(), get_features(), 10), {"ep": nap.IntervalSet(0, 400)}, - np.ones((10, 10)) * 0.25, + np.ones((10, 10)) * 0.5, ), ( (get_group(), get_features(), 10), From 18b0868f5396845746f9eff8e59a9ebe6e19d597 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 3 Jul 2025 21:57:06 +0000 Subject: [PATCH 008/244] fix tests note as to why I changed some original tests: the old compute_2d_tuning_curves_continuous used np.digitize, which does not include the last edge the new implementation does, so I had to reflect that in the expected values --- pynapple/process/tuning_curves.py | 8 +++++--- tests/test_tuning_curves.py | 8 ++++---- tests/test_tuning_curves_general.py | 10 ++++------ 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index cb55a24aa..1ec0236e3 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -168,21 +168,23 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= occupancy, bin_edges = np.histogramdd(features, bins=bins, range=range) occupancy[occupancy == 0] = np.nan - # tuning curves + # tunning curves if isinstance(group, nap.TsGroup): tcs = np.zeros([len(group), *occupancy.shape]) for i, n in enumerate(group): - count, _ = np.histogramdd( + tcs[i], _ = np.histogramdd( group[n].value_from(features, epochs).values, bins=bin_edges, ) - tcs[i] = (count / occupancy) * fs + tcs = (tcs / occupancy) * fs else: + print(group.value_from(features, epochs).values) tcs = binned_statistic_dd( group.value_from(features, epochs).values, values=group.values.T, bins=bin_edges, ).statistic + tcs[np.isnan(tcs)] = 0.0 tcs[:, np.isnan(occupancy)] = np.nan return xr.DataArray( diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index a755e6555..287de1297 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -469,7 +469,7 @@ def test_compute_1d_tuning_curves_continuous(args, kwargs, expected): if "minmax" in kwargs: tmp = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins + 1) else: - tmp = np.linspace(np.min(args[1]), np.max(args[1]), nb_bins + 1) + tmp = np.linspace(np.min(feature), np.max(feature), nb_bins + 1) np.testing.assert_almost_equal(tmp[0:-1] + np.diff(tmp) / 2, tc.index.values) # Array np.testing.assert_almost_equal(tc.values, expected) @@ -495,13 +495,13 @@ def test_compute_1d_tuning_curves_continuous(args, kwargs, expected): ), 2, {}, - {"x": np.array([[1, 0], [0, 0]]), "y": np.array([[2, 0], [0, 0]])}, + {"x": np.ones((2, 2)), "y": np.ones((2, 2)) * 2}, ), ( nap.Tsd(t=np.arange(0, 100), d=np.hstack((np.ones((100,)) * 2))), 2, {}, - {0: np.array([[2, 0], [0, 0]])}, + {0: np.ones((2, 2)) * 2}, ), ( nap.TsdFrame( @@ -510,7 +510,7 @@ def test_compute_1d_tuning_curves_continuous(args, kwargs, expected): ), (1, 2), {}, - {0: np.array([[1.0, 0.0]]), 1: np.array([[2.0, 0.0]])}, + {0: np.array([[1.0, 1.0]]), 1: np.array([[2.0, 2.0]])}, ), ( nap.TsdFrame( diff --git a/tests/test_tuning_curves_general.py b/tests/test_tuning_curves_general.py index bd7eef41d..431f5f3d3 100644 --- a/tests/test_tuning_curves_general.py +++ b/tests/test_tuning_curves_general.py @@ -247,19 +247,17 @@ def test_compute_tuning_curves(group, features, bins, range_alpha, epochs): columns=["f0"], ) - # occupancy + fs = 10.0 occupancy, bin_edges = np.histogramdd(features, bins=bins, range=range) - occupancy[occupancy == 0] = np.nan - - # tuning curves if isinstance(group, nap.TsGroup): + occupancy[occupancy == 0] = np.nan expected_tcs = np.zeros([len(group), *occupancy.shape]) - for i, n in enumerate(group.keys()): + for i, n in enumerate(group): count, _ = np.histogramdd( group[n].value_from(features, epochs).values, bins=bin_edges, ) - expected_tcs[i] = (count / occupancy) * 0.1 + expected_tcs[i] = (count / occupancy) * fs else: expected_tcs = scipy.stats.binned_statistic_dd( group.value_from(features, epochs).values, From 12285a6f354fa7334a2b3268f15535e3360bbf8b Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 3 Jul 2025 22:09:14 +0000 Subject: [PATCH 009/244] re-add tests for deprecated functions --- pynapple/process/tuning_curves.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 1ec0236e3..bae5bf629 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -178,7 +178,6 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= ) tcs = (tcs / occupancy) * fs else: - print(group.value_from(features, epochs).values) tcs = binned_statistic_dd( group.value_from(features, epochs).values, values=group.values.T, @@ -201,6 +200,7 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= ) +@_validate_tuning_inputs def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): warnings.warn( "compute_1d_tuning_curves is deprecated and will be removed in v1.0; " @@ -222,6 +222,7 @@ def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): ) +@_validate_tuning_inputs def compute_1d_tuning_curves_continuous( tsdframe, feature, nb_bins, ep=None, minmax=None ): @@ -245,6 +246,7 @@ def compute_1d_tuning_curves_continuous( ) +@_validate_tuning_inputs def compute_2d_tuning_curves(group, features, nb_bins, ep=None, minmax=None): warnings.warn( "compute_2d_tuning_curves is deprecated and will be removed in v1.0; " @@ -266,6 +268,7 @@ def compute_2d_tuning_curves(group, features, nb_bins, ep=None, minmax=None): return tcs, bins +@_validate_tuning_inputs def compute_2d_tuning_curves_continuous( tsdframe, features, nb_bins, ep=None, minmax=None ): From b59e9cf6e6ddbd77274881b4306c90d91b745d73 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Sat, 5 Jul 2025 19:22:31 +0000 Subject: [PATCH 010/244] corrected isinstance to older way --- pynapple/process/tuning_curves.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index bae5bf629..3ddb4b50a 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -129,7 +129,7 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= t=group.times(), time_support=group.time_support, ) - elif not isinstance(group, nap.TsGroup | nap.TsdFrame): + elif not isinstance(group, (nap.TsGroup, nap.TsdFrame)): raise TypeError("group should be a Tsd, TsdFrame, TsGroup, or dict.") # check features From 7b9583b1b419fb3ee1e783a55870ab7b23e462d7 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 7 Jul 2025 21:52:41 +0000 Subject: [PATCH 011/244] use histogramdd only --- pynapple/process/tuning_curves.py | 43 +++++++++-------- tests/test_tuning_curves_general.py | 74 ++++++++++++++--------------- 2 files changed, 61 insertions(+), 56 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 3ddb4b50a..fbe1147f0 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -11,7 +11,6 @@ import numpy as np import pandas as pd import xarray as xr -from scipy.stats import binned_statistic_dd from .. import core as nap @@ -157,40 +156,46 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= raise TypeError("epochs should be an IntervalSet.") group = group.restrict(epochs) - # check rate + # check fs if fs is None: fs = 1 / np.mean(features.time_diff(epochs=epochs).values) - else: - if not isinstance(fs, (int, float)): - raise TypeError("fs should be a number (int or float)") + if not isinstance(fs, (int, float)): + raise TypeError("fs should be a number (int or float)") # occupancy occupancy, bin_edges = np.histogramdd(features, bins=bins, range=range) occupancy[occupancy == 0] = np.nan # tunning curves + keys = group.keys() if isinstance(group, nap.TsGroup) else group.columns + tcs = np.zeros([len(keys), *occupancy.shape]) if isinstance(group, nap.TsGroup): - tcs = np.zeros([len(group), *occupancy.shape]) - for i, n in enumerate(group): - tcs[i], _ = np.histogramdd( - group[n].value_from(features, epochs).values, - bins=bin_edges, + for i, n in enumerate(keys): + tcs[i] = ( + np.histogramdd( + group[n].value_from(features, epochs), + bins=bin_edges, + )[0] + / occupancy + * fs ) - tcs = (tcs / occupancy) * fs else: - tcs = binned_statistic_dd( - group.value_from(features, epochs).values, - values=group.values.T, - bins=bin_edges, - ).statistic - tcs[np.isnan(tcs)] = 0.0 - tcs[:, np.isnan(occupancy)] = np.nan + values = group.value_from(features, epochs) + for i, n in enumerate(keys): + tcs[i] = ( + np.histogramdd( + values, + weights=group.values[:, i], + bins=bin_edges, + )[0] + / occupancy + ) return xr.DataArray( tcs, name="tuning curves", coords={ - "unit": group.keys() if isinstance(group, nap.TsGroup) else group.columns, + "unit": keys, **{ (f"f{feature}" if isinstance(feature, int) else feature): e[:-1] + np.diff(e) / 2 diff --git a/tests/test_tuning_curves_general.py b/tests/test_tuning_curves_general.py index 431f5f3d3..f6665a2e2 100644 --- a/tests/test_tuning_curves_general.py +++ b/tests/test_tuning_curves_general.py @@ -5,35 +5,23 @@ import numpy as np import pytest -import scipy import xarray as xr import pynapple as nap -def get_group( - num_units: int, duration: float = 100.0, mean_rate_hz: float = 5.0 -) -> nap.TsGroup: - units = {} - for k in range(num_units): - n_spikes = np.random.poisson(mean_rate_hz * duration) - spike_times = np.random.uniform(0.0, duration, size=n_spikes) - spike_times.sort() - units[k] = nap.Ts(t=spike_times) - - return nap.TsGroup(units) +def get_group(n): + return nap.TsGroup( + {i: nap.Ts(t=np.arange(0, 200, 10**i)) for i in range(-1, n - 1)} + ) -def get_features(num_dims: int, duration: float = 100.0, dt: float = 0.1): - t = np.arange(0.0, duration, dt) - # Saw‑tooth features, each phase‑shifted so they differ - data = np.column_stack([(t + i / num_dims) % 1.0 for i in range(num_dims)]) - # Wrap in a TsdFrame with a matching time_support +def get_features(n, fs=10.0): return nap.TsdFrame( - t=t, - d=data, - time_support=nap.IntervalSet(0.0, duration), - columns=[f"col{i}" for i in range(num_dims)], + t=np.arange(0, 200, 1 / fs), + d=np.stack([np.arange(0, 200, 1 / fs) % i for i in range(1, n + 1)], axis=1), + time_support=nap.IntervalSet(0, 200), + columns=[f"f{i}" for i in range(1, n + 1)], ) @@ -167,6 +155,7 @@ def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation) nap.compute_tuning_curves(group, features, **kwargs) +@pytest.mark.filterwarnings("ignore") @pytest.mark.parametrize( "group", [ @@ -177,14 +166,15 @@ def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation) ], ) @pytest.mark.parametrize( - "features, bins", + "features, bins, fs", [ - (get_features(D), bins) + (get_features(D, fs=10.0 if fs is None else fs), bins, fs) for D in range(1, 4) for bins in ( [2, 5, 10] + [list(tup) for tup in itertools.product([2, 5, 10], repeat=D) if D > 1] ) + for fs in [None, 1.0, 10.0] ] + [ ( @@ -194,9 +184,11 @@ def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation) time_support=tsdframe.time_support, ), num_bins, + fs, ) + for fs in [None, 1.0, 10.0] for num_bins in [2, 5, 10] - if (tsdframe := get_features(num_dims=1)) + if (tsdframe := get_features(1, fs=10.0 if fs is None else fs)) ], ) @pytest.mark.parametrize("range_alpha", [None, 0.0, 0.5]) @@ -210,7 +202,7 @@ def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation) nap.IntervalSet([0.0, 40.0], [10.0, 90.0]), ], ) -def test_compute_tuning_curves(group, features, bins, range_alpha, epochs): +def test_compute_tuning_curves(group, features, bins, range_alpha, epochs, fs): if range_alpha is None: range = None else: @@ -228,6 +220,7 @@ def test_compute_tuning_curves(group, features, bins, range_alpha, epochs): bins=bins, range=range, epochs=epochs, + fs=fs, ) # ------------------------------------------------------------------ @@ -239,6 +232,9 @@ def test_compute_tuning_curves(group, features, bins, range_alpha, epochs): features = features.restrict(epochs) group = group.restrict(epochs) + if fs is None: + fs = 1 / np.mean(features.time_diff(epochs=epochs)) + if isinstance(features, nap.Tsd): features = nap.TsdFrame( d=features.values, @@ -247,24 +243,29 @@ def test_compute_tuning_curves(group, features, bins, range_alpha, epochs): columns=["f0"], ) - fs = 10.0 occupancy, bin_edges = np.histogramdd(features, bins=bins, range=range) + occupancy[occupancy == 0] = np.nan + + keys = group.keys() if isinstance(group, nap.TsGroup) else group.columns + expected_tcs = np.zeros([len(keys), *occupancy.shape]) if isinstance(group, nap.TsGroup): - occupancy[occupancy == 0] = np.nan - expected_tcs = np.zeros([len(group), *occupancy.shape]) - for i, n in enumerate(group): + for i, n in enumerate(keys): count, _ = np.histogramdd( group[n].value_from(features, epochs).values, bins=bin_edges, ) expected_tcs[i] = (count / occupancy) * fs else: - expected_tcs = scipy.stats.binned_statistic_dd( - group.value_from(features, epochs).values, - values=group.values.T, - bins=bin_edges, - ).statistic - expected_tcs[:, np.isnan(occupancy)] = np.nan + values = group.value_from(features, epochs) + for i, n in enumerate(keys): + expected_tcs[i] = ( + np.histogramdd( + values, + weights=group.values[:, i], + bins=bin_edges, + )[0] + / occupancy + ) # expected bin centres expected_tc_bins = [e[:-1] + np.diff(e) / 2 for e in bin_edges] @@ -278,9 +279,8 @@ def test_compute_tuning_curves(group, features, bins, range_alpha, epochs): np.testing.assert_allclose(tcs, expected_tcs) # labels - units = group.keys() if isinstance(group, nap.TsGroup) else group.columns assert "unit" in tcs.coords - assert np.all(tcs.coords["unit"] == units) + assert np.all(tcs.coords["unit"] == keys) for dim, (dim_label, bins) in enumerate(list(tcs.coords.items())[1:]): assert dim_label == features.columns[dim] np.testing.assert_allclose(bins, expected_tc_bins[dim]) From e7a14683995e71920a7f86c571df0219a151cba8 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 7 Jul 2025 21:59:33 +0000 Subject: [PATCH 012/244] cleaning --- pynapple/process/tuning_curves.py | 17 ++++++++--------- tests/test_tuning_curves_general.py | 16 +++++++--------- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index fbe1147f0..1d68346ae 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -170,26 +170,25 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= keys = group.keys() if isinstance(group, nap.TsGroup) else group.columns tcs = np.zeros([len(keys), *occupancy.shape]) if isinstance(group, nap.TsGroup): + # SPIKES for i, n in enumerate(keys): tcs[i] = ( np.histogramdd( group[n].value_from(features, epochs), bins=bin_edges, )[0] - / occupancy * fs ) else: + # RATES values = group.value_from(features, epochs) for i, n in enumerate(keys): - tcs[i] = ( - np.histogramdd( - values, - weights=group.values[:, i], - bins=bin_edges, - )[0] - / occupancy - ) + tcs[i] = np.histogramdd( + values, + weights=group.values[:, i], + bins=bin_edges, + )[0] + tcs /= occupancy return xr.DataArray( tcs, diff --git a/tests/test_tuning_curves_general.py b/tests/test_tuning_curves_general.py index f6665a2e2..287e62a7c 100644 --- a/tests/test_tuning_curves_general.py +++ b/tests/test_tuning_curves_general.py @@ -254,18 +254,16 @@ def test_compute_tuning_curves(group, features, bins, range_alpha, epochs, fs): group[n].value_from(features, epochs).values, bins=bin_edges, ) - expected_tcs[i] = (count / occupancy) * fs + expected_tcs[i] = count * fs else: values = group.value_from(features, epochs) for i, n in enumerate(keys): - expected_tcs[i] = ( - np.histogramdd( - values, - weights=group.values[:, i], - bins=bin_edges, - )[0] - / occupancy - ) + expected_tcs[i] = np.histogramdd( + values, + weights=group.values[:, i], + bins=bin_edges, + )[0] + expected_tcs /= occupancy # expected bin centres expected_tc_bins = [e[:-1] + np.diff(e) / 2 for e in bin_edges] From e873d2f3af05309b79d7a163eccb9955a674035a Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 8 Jul 2025 16:31:33 +0000 Subject: [PATCH 013/244] hard-coded tc tests --- tests/test_tuning_curves_general.py | 225 +++++++++++++--------------- 1 file changed, 100 insertions(+), 125 deletions(-) diff --git a/tests/test_tuning_curves_general.py b/tests/test_tuning_curves_general.py index 287e62a7c..f6c0083a1 100644 --- a/tests/test_tuning_curves_general.py +++ b/tests/test_tuning_curves_general.py @@ -12,16 +12,17 @@ def get_group(n): return nap.TsGroup( - {i: nap.Ts(t=np.arange(0, 200, 10**i)) for i in range(-1, n - 1)} + {i + 1: nap.Ts(t=np.arange(0, 100, 10 ** (i - 1))) for i in range(n)} ) def get_features(n, fs=10.0): return nap.TsdFrame( - t=np.arange(0, 200, 1 / fs), - d=np.stack([np.arange(0, 200, 1 / fs) % i for i in range(1, n + 1)], axis=1), - time_support=nap.IntervalSet(0, 200), - columns=[f"f{i}" for i in range(1, n + 1)], + t=np.arange(0, 100, 1 / fs), + d=np.stack( + [np.arange(0, 100, 1 / fs) % 10 * i for i in range(1, n + 1)], axis=1 + ), + columns=[f"f{i}" for i in range(n)], ) @@ -155,130 +156,104 @@ def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation) nap.compute_tuning_curves(group, features, **kwargs) -@pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize( - "group", - [ - group.count(0.1) if continuous else group - for continuous in [False, True] - for n_units in range(1, 4) - if (group := get_group(n_units)) - ], -) @pytest.mark.parametrize( - "features, bins, fs", + "group, features, kwargs, expected", [ - (get_features(D, fs=10.0 if fs is None else fs), bins, fs) - for D in range(1, 4) - for bins in ( - [2, 5, 10] - + [list(tup) for tup in itertools.product([2, 5, 10], repeat=D) if D > 1] - ) - for fs in [None, 1.0, 10.0] - ] - + [ + # single unit, single feature ( - nap.Tsd( - t=tsdframe.times(), - d=tsdframe.values.flatten(), - time_support=tsdframe.time_support, + get_group(1), + get_features(1), + {}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "f0"], + coords={"unit": [1], "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, ), - num_bins, - fs, - ) - for fs in [None, 1.0, 10.0] - for num_bins in [2, 5, 10] - if (tsdframe := get_features(1, fs=10.0 if fs is None else fs)) - ], -) -@pytest.mark.parametrize("range_alpha", [None, 0.0, 0.5]) -@pytest.mark.parametrize( - "epochs", - [ - None, - nap.IntervalSet(0.0, 50.0), - nap.IntervalSet(0.0, 100.0), - nap.IntervalSet(0.0, 200.0), - nap.IntervalSet([0.0, 40.0], [10.0, 90.0]), + ), + # multiple units, single feature + ( + get_group(2), + get_features(1), + {}, + xr.DataArray( + np.concatenate([np.full((1, 10), 10.0), np.full((1, 10), 1.0)]), + dims=["unit", "f0"], + coords={"unit": [1, 2], "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # multiple units, multiple features + ( + get_group(2), + get_features(2), + {}, + xr.DataArray( + np.stack( + [ + np.where(np.eye(10), 10.0, np.nan), + np.where(np.eye(10), 1.0, np.nan), + ] + ), + dims=["unit", "f0", "f1"], + coords={ + "unit": [1, 2], + "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + "f1": np.linspace(0, 19.8, 11)[:-1] + 0.99, + }, + ), + ), + # single unit, single feature, specified bins + ( + get_group(1), + get_features(1), + {"bins": 5}, + xr.DataArray( + np.full((1, 5), 10.0), + dims=["unit", "f0"], + coords={"unit": [1], "f0": np.linspace(0, 9.9, 6)[:-1] + 0.99}, + ), + ), + # single unit, multiple features, specified bins + ( + get_group(1), + get_features(2), + {"bins": (5, 4)}, + xr.DataArray( + np.array( + [ + [ + [10.0, np.nan, np.nan, np.nan], + [10.0, 10.0, np.nan, np.nan], + [np.nan, 10.0, 10.0, np.nan], + [np.nan, np.nan, 10.0, 10.0], + [np.nan, np.nan, np.nan, 10.0], + ] + ] + ), + dims=["unit", "f0", "f1"], + coords={ + "unit": [1], + "f0": np.linspace(0, 9.9, 6)[:-1] + 0.99, + "f1": np.linspace(0, 19.8, 5)[:-1] + 2.475, + }, + ), + ), + # single unit, single feature, specified range + ( + get_group(1), + get_features(1), + {"range": [(0, 5)]}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "f0"], + coords={"unit": [1], "f0": np.linspace(0, 5.0, 11)[:-1] + 0.25}, + ), + ), ], ) -def test_compute_tuning_curves(group, features, bins, range_alpha, epochs, fs): - if range_alpha is None: - range = None - else: - full_min = np.nanmin(features.values, axis=0) - full_max = np.nanmax(features.values, axis=0) - span = full_max - full_min - range = np.c_[full_min + range_alpha * span, full_max - range_alpha * span] - - # ------------------------------------------------------------------ - # compute actual - # ------------------------------------------------------------------ - tcs = nap.compute_tuning_curves( - group=group, - features=features, - bins=bins, - range=range, - epochs=epochs, - fs=fs, - ) - - # ------------------------------------------------------------------ - # compute expected - # ------------------------------------------------------------------ - if epochs is None: - epochs = features.time_support - else: - features = features.restrict(epochs) - group = group.restrict(epochs) - - if fs is None: - fs = 1 / np.mean(features.time_diff(epochs=epochs)) - - if isinstance(features, nap.Tsd): - features = nap.TsdFrame( - d=features.values, - t=features.times(), - time_support=features.time_support, - columns=["f0"], - ) - - occupancy, bin_edges = np.histogramdd(features, bins=bins, range=range) - occupancy[occupancy == 0] = np.nan - - keys = group.keys() if isinstance(group, nap.TsGroup) else group.columns - expected_tcs = np.zeros([len(keys), *occupancy.shape]) - if isinstance(group, nap.TsGroup): - for i, n in enumerate(keys): - count, _ = np.histogramdd( - group[n].value_from(features, epochs).values, - bins=bin_edges, - ) - expected_tcs[i] = count * fs - else: - values = group.value_from(features, epochs) - for i, n in enumerate(keys): - expected_tcs[i] = np.histogramdd( - values, - weights=group.values[:, i], - bins=bin_edges, - )[0] - expected_tcs /= occupancy - - # expected bin centres - expected_tc_bins = [e[:-1] + np.diff(e) / 2 for e in bin_edges] - - # ------------------------------------------------------------------ - # test - # ------------------------------------------------------------------ - - # values +def test_compute_tuning_curves(group, features, kwargs, expected): + tcs = nap.compute_tuning_curves(group, features, **kwargs) assert isinstance(tcs, xr.DataArray) - np.testing.assert_allclose(tcs, expected_tcs) - - # labels - assert "unit" in tcs.coords - assert np.all(tcs.coords["unit"] == keys) - for dim, (dim_label, bins) in enumerate(list(tcs.coords.items())[1:]): - assert dim_label == features.columns[dim] - np.testing.assert_allclose(bins, expected_tc_bins[dim]) + for dim in expected.coords.keys(): + assert dim in tcs.coords + np.testing.assert_allclose(tcs.coords[dim].values, expected.coords[dim].values) + np.testing.assert_allclose(tcs, expected) From 3b9905e1ec9dc9cc9fd24f7f97eb0522ae29504f Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 8 Jul 2025 22:25:03 +0000 Subject: [PATCH 014/244] fixed implementation with histgramdd + hard-coded tests also changed f0 -> feature0 --- pynapple/process/tuning_curves.py | 26 ++-- tests/test_tuning_curves.py | 1 + tests/test_tuning_curves_general.py | 213 +++++++++++++++++++++++++--- 3 files changed, 210 insertions(+), 30 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 1d68346ae..7df5d45c2 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -164,7 +164,6 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= # occupancy occupancy, bin_edges = np.histogramdd(features, bins=bins, range=range) - occupancy[occupancy == 0] = np.nan # tunning curves keys = group.keys() if isinstance(group, nap.TsGroup) else group.columns @@ -172,23 +171,26 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= if isinstance(group, nap.TsGroup): # SPIKES for i, n in enumerate(keys): - tcs[i] = ( - np.histogramdd( - group[n].value_from(features, epochs), - bins=bin_edges, - )[0] - * fs - ) + tcs[i] = np.histogramdd( + group[n].value_from(features, epochs), + bins=bin_edges, + )[0] + occupancy[occupancy == 0.0] = np.nan + tcs = (tcs / occupancy) * fs else: # RATES values = group.value_from(features, epochs) + counts = np.histogramdd(values, bins=bin_edges)[0] + counts[counts == 0] = np.nan for i, n in enumerate(keys): tcs[i] = np.histogramdd( values, weights=group.values[:, i], bins=bin_edges, )[0] - tcs /= occupancy + tcs /= counts + tcs[np.isnan(tcs)] = 0.0 + tcs[:, occupancy == 0.0] = np.nan return xr.DataArray( tcs, @@ -196,7 +198,7 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= coords={ "unit": keys, **{ - (f"f{feature}" if isinstance(feature, int) else feature): e[:-1] + (f"feature{feature}" if isinstance(feature, int) else feature): e[:-1] + np.diff(e) / 2 for feature, e in zip(features.columns, bin_edges) }, @@ -221,7 +223,7 @@ def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): ) return pd.DataFrame( xarray.values.T, - index=xarray.coords["f0"].values, + index=xarray.coords["feature0"].values, columns=xarray.coords["unit"].values, ) @@ -245,7 +247,7 @@ def compute_1d_tuning_curves_continuous( ) return pd.DataFrame( xarray.values.T, - index=xarray.coords["f0"].values, + index=xarray.coords["feature0"].values, columns=xarray.coords["unit"].values, ) diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 287de1297..b7ee0a479 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -475,6 +475,7 @@ def test_compute_1d_tuning_curves_continuous(args, kwargs, expected): np.testing.assert_almost_equal(tc.values, expected) +@pytest.mark.filterwarnings("ignore") @pytest.mark.parametrize( "tsdframe, nb_bins, kwargs, expected", [ diff --git a/tests/test_tuning_curves_general.py b/tests/test_tuning_curves_general.py index f6c0083a1..e0ca887ab 100644 --- a/tests/test_tuning_curves_general.py +++ b/tests/test_tuning_curves_general.py @@ -1,6 +1,5 @@ """Tests of tuning curves for `pynapple` package.""" -import itertools from contextlib import nullcontext as does_not_raise import numpy as np @@ -22,7 +21,7 @@ def get_features(n, fs=10.0): d=np.stack( [np.arange(0, 100, 1 / fs) % 10 * i for i in range(1, n + 1)], axis=1 ), - columns=[f"f{i}" for i in range(n)], + columns=[f"feature{i}" for i in range(n)], ) @@ -159,6 +158,51 @@ def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation) @pytest.mark.parametrize( "group, features, kwargs, expected", [ + # single rate unit, single feature + ( + get_group(1).count(1.0), + get_features(1), + {}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # multiple rate units, single feature + ( + get_group(2).count(1.0), + get_features(1), + {}, + xr.DataArray( + np.concatenate([np.full((1, 10), 10.0), np.full((1, 10), 1.0)]), + dims=["unit", "feature0"], + coords={ + "unit": [1, 2], + "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + }, + ), + ), + # multiple rate units, multiple features + ( + get_group(2).count(1.0), + get_features(2), + {}, + xr.DataArray( + np.stack( + [ + np.where(np.eye(10), 10.0, np.nan), + np.where(np.eye(10), 1.0, np.nan), + ] + ), + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1, 2], + "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, + }, + ), + ), # single unit, single feature ( get_group(1), @@ -166,8 +210,8 @@ def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation) {}, xr.DataArray( np.full((1, 10), 10.0), - dims=["unit", "f0"], - coords={"unit": [1], "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, ), ), # multiple units, single feature @@ -177,8 +221,11 @@ def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation) {}, xr.DataArray( np.concatenate([np.full((1, 10), 10.0), np.full((1, 10), 1.0)]), - dims=["unit", "f0"], - coords={"unit": [1, 2], "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + dims=["unit", "feature0"], + coords={ + "unit": [1, 2], + "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + }, ), ), # multiple units, multiple features @@ -193,26 +240,41 @@ def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation) np.where(np.eye(10), 1.0, np.nan), ] ), - dims=["unit", "f0", "f1"], + dims=["unit", "feature0", "feature1"], coords={ "unit": [1, 2], - "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495, - "f1": np.linspace(0, 19.8, 11)[:-1] + 0.99, + "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, }, ), ), - # single unit, single feature, specified bins + # single unit, single feature, specified number of bins ( get_group(1), get_features(1), {"bins": 5}, xr.DataArray( np.full((1, 5), 10.0), - dims=["unit", "f0"], - coords={"unit": [1], "f0": np.linspace(0, 9.9, 6)[:-1] + 0.99}, + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99}, ), ), - # single unit, multiple features, specified bins + # single unit, multiple features, specified number of bins + ( + get_group(1), + get_features(2), + {"bins": 5}, + xr.DataArray( + np.where(np.eye(5), 10.0, np.nan)[None, :], + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99, + "feature1": np.linspace(0, 19.8, 6)[:-1] + 1.98, + }, + ), + ), + # single unit, multiple features, specified number of bins per feature ( get_group(1), get_features(2), @@ -229,11 +291,37 @@ def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation) ] ] ), - dims=["unit", "f0", "f1"], + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99, + "feature1": np.linspace(0, 19.8, 5)[:-1] + 2.475, + }, + ), + ), + # single unit, single feature, specified bins + ( + get_group(1), + get_features(1), + {"bins": [np.linspace(0, 10, 6)]}, + xr.DataArray( + np.full((1, 5), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.arange(1, 11, 2)}, + ), + ), + # single unit, multiple features, specified bins + ( + get_group(1), + get_features(2), + {"bins": [np.linspace(0, 10, 6), np.linspace(0, 20, 6)]}, + xr.DataArray( + np.where(np.eye(5), 10.0, np.nan)[None, :], + dims=["unit", "feature0", "feature1"], coords={ "unit": [1], - "f0": np.linspace(0, 9.9, 6)[:-1] + 0.99, - "f1": np.linspace(0, 19.8, 5)[:-1] + 2.475, + "feature0": np.arange(1, 11, 2), + "feature1": np.arange(2, 22, 4), }, ), ), @@ -244,8 +332,97 @@ def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation) {"range": [(0, 5)]}, xr.DataArray( np.full((1, 10), 10.0), - dims=["unit", "f0"], - coords={"unit": [1], "f0": np.linspace(0, 5.0, 11)[:-1] + 0.25}, + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25}, + ), + ), + # single unit, multiple features, specified range per feature + ( + get_group(1), + get_features(2), + {"range": [(0, 5), (0, 10)]}, + xr.DataArray( + np.where(np.eye(10), 10.0, np.nan)[None, :], + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, + "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, + }, + ), + ), + # single unit, single feature, specified range and number of bins + ( + get_group(1), + get_features(1), + {"bins": 10, "range": [(0, 5)]}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25}, + ), + ), + # single unit, multiple features, specified range per feature and number of bins + ( + get_group(1), + get_features(2), + {"bins": 10, "range": [(0, 5), (0, 10)]}, + xr.DataArray( + np.where(np.eye(10), 10.0, np.nan)[None, :], + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, + "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, + }, + ), + ), + # single unit, multiple features, specified range and number of bins per feature + ( + get_group(1), + get_features(2), + {"bins": (10, 10), "range": [(0, 5), (0, 10)]}, + xr.DataArray( + np.where(np.eye(10), 10.0, np.nan)[None, :], + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, + "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, + }, + ), + ), + # single unit, single feature, specified epochs (smaller) + ( + get_group(1), + get_features(1), + {"epochs": nap.IntervalSet([0.0, 50.0])}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # single unit, single feature, specified epochs (larger) + ( + get_group(1), + get_features(1), + {"epochs": nap.IntervalSet([0.0, 200.0])}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # single unit, single feature, specified epochs (multiple) + ( + get_group(1), + get_features(1), + {"epochs": nap.IntervalSet([0.0, 50.0], [20.0, 70.0])}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, ), ), ], From 359f92e0191e61cd5a6396fa83b6868070cdaeb0 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 8 Jul 2025 22:28:47 +0000 Subject: [PATCH 015/244] remove epochs warning as it is not neccessary --- pynapple/process/tuning_curves.py | 6 ------ tests/test_tuning_curves_general.py | 6 +----- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 7df5d45c2..27f44eadc 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -145,12 +145,6 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= if epochs is None: epochs = features.time_support elif isinstance(epochs, nap.IntervalSet): - if features.time_support.tot_length() < epochs.tot_length(): - warnings.warn( - "The passed epochs are larger than the time support of the features," - "this will artificially increase the outer bins of the tuning curves.", - UserWarning, - ) features = features.restrict(epochs) else: raise TypeError("epochs should be an IntervalSet.") diff --git a/tests/test_tuning_curves_general.py b/tests/test_tuning_curves_general.py index e0ca887ab..5d4e3d462 100644 --- a/tests/test_tuning_curves_general.py +++ b/tests/test_tuning_curves_general.py @@ -117,11 +117,7 @@ def get_features(n, fs=10.0): get_group(1), get_features(1), {"epochs": nap.IntervalSet([0.0, 1000.0])}, - pytest.warns( - UserWarning, - match="The passed epochs are larger than the time support of the features," - "this will artificially increase the outer bins of the tuning curves.", - ), + does_not_raise(), ), # fs ( From 3c7c9fe519e4abefccb2e1e195e7f7f2aa4bd456 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 9 Jul 2025 15:03:00 +0000 Subject: [PATCH 016/244] update tutorial_calcium_imaging --- doc/examples/tutorial_calcium_imaging.md | 159 ++++++++--------------- 1 file changed, 53 insertions(+), 106 deletions(-) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index bb594bd8c..014e55d05 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -1,17 +1,18 @@ --- -jupytext: - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.16.4 -kernelspec: - display_name: Python 3 (ipykernel) - language: python - name: python3 +jupyter: + jupytext: + default_lexer: ipython3 + text_representation: + extension: .md + format_name: markdown + format_version: '1.3' + jupytext_version: 1.17.2 + kernelspec: + display_name: pynapple + language: python + name: python3 --- - Calcium Imaging ============ @@ -21,18 +22,12 @@ For the example dataset, we will be working with a recording of a freely-moving The NWB file for the example is hosted on [OSF](https://osf.io/sbnaw). We show below how to stream it. - -```{code-cell} ipython3 ---- -jupyter: - outputs_hidden: false ---- -import numpy as pd +```python jupyter={"outputs_hidden": false} import pynapple as nap import matplotlib.pyplot as plt import seaborn as sns -import sys, os -import requests, math +import os +import requests custom_params = {"axes.spines.right": False, "axes.spines.top": False} sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params) @@ -43,12 +38,7 @@ Downloading the data ------------------ First things first: Let's find our file - -```{code-cell} ipython3 ---- -jupyter: - outputs_hidden: false ---- +```python jupyter={"outputs_hidden": false} path = "A0670-221213.nwb" if path not in os.listdir("."): r = requests.get(f"https://osf.io/sbnaw/download", stream=True) @@ -63,24 +53,14 @@ Parsing the data ------------------ Now that we have the file, let's load the data - -```{code-cell} ipython3 ---- -jupyter: - outputs_hidden: false ---- -data = nap.load_file(path) +```python jupyter={"outputs_hidden": false} +data = nap.load_file(path, lazy_loading=False) print(data) ``` Let's save the RoiResponseSeries as a variable called 'transients' and print it - -```{code-cell} ipython3 ---- -jupyter: - outputs_hidden: false ---- +```python jupyter={"outputs_hidden": false} transients = data['RoiResponseSeries'] print(transients) ``` @@ -88,15 +68,9 @@ print(transients) *** Plotting the activity of one neuron ----------------------------------- -Our transients are saved as a (35757, 65) TsdFrame. Looking at the printed object, you can see that we have 35757 data points for each of our 65 regions of interest. We want to see which of these are head-direction cells, so we need to plot a tuning curve of fluorescence vs head-direction of the animal. - - +Our transients are saved as a (35757, 65) TsdFrame. Looking at the printed object, you can see that we have 35757 data points for each of our 65 regions of interest (ROIs). We want to see which of these are head-direction cells, so we need to plot a tuning curve of fluorescence vs head-direction of the animal. -```{code-cell} ipython3 ---- -jupyter: - outputs_hidden: false ---- +```python jupyter={"outputs_hidden": false} plt.figure(figsize=(6, 2)) plt.plot(transients[0:2000,0], linewidth=5) plt.xlabel("Time (s)") @@ -104,26 +78,16 @@ plt.ylabel("Fluorescence") plt.show() ``` -Here we extract the head-direction as a variable called angle +Here we extract the head-direction as a variable called angle. - -```{code-cell} ipython3 ---- -jupyter: - outputs_hidden: false ---- +```python jupyter={"outputs_hidden": false} angle = data['ry'] print(angle) ``` As you can see, we have a longer recording for our tracking of the animal's head than we do for our calcium imaging - something to keep in mind. - -```{code-cell} ipython3 ---- -jupyter: - outputs_hidden: false ---- +```python jupyter={"outputs_hidden": false} print(transients.time_support) print(angle.time_support) ``` @@ -131,44 +95,38 @@ print(angle.time_support) *** Calcium tuning curves --------------------- -Here we compute the tuning curves of all the neurons +Here we compute the tuning curves of all the ROIs. +```python jupyter={"outputs_hidden": false} +tcurves = nap.compute_tuning_curves(transients, angle, bins = 120) +tcurves +``` -```{code-cell} ipython3 ---- -jupyter: - outputs_hidden: false ---- -tcurves = nap.compute_1d_tuning_curves_continuous(transients, angle, nb_bins = 120) +This yields an `xarray.DataFrame`, which we can beautify by setting feature names and units: -print(tcurves) -``` +```python +def set_metadata(tcurves): + _tcurves=tcurves.rename({"feature0": "Angle", "unit": "ROI"}) + _tcurves.name="Fluorescence" + _tcurves.attrs["units"]="a.u." + _tcurves.coords["Angle"].attrs["units"]="rad" + return _tcurves -We now have a DataFrame, where our index is the angle of the animal's head in radians, and each column represents the tuning curve of each region of interest. We can plot one neuron. +annotated_tcurves = set_metadata(tcurves) +annotated_tcurves +``` +Having set some metadata, we can easily plot one ROI: -```{code-cell} ipython3 ---- -jupyter: - outputs_hidden: false ---- -plt.figure() -plt.plot(tcurves[4]) -plt.xlabel("Angle") -plt.ylabel("Fluorescence") -plt.show() +```python +annotated_tcurves[4].plot() ``` It looks like this could be a head-direction cell. One important property of head-directions cells however, is that their firing with respect to head-direction is stable. To check for their stability, we can split our recording in two and compute a tuning curve for each half of the recording. We start by finding the midpoint of the recording, using the function [`get_intervals_center`](pynapple.IntervalSet.get_intervals_center). Using this, then create one new IntervalSet with two rows, one for each half of the recording. - -```{code-cell} ipython3 ---- -jupyter: - outputs_hidden: false ---- +```python jupyter={"outputs_hidden": false} center = transients.time_support.get_intervals_center() halves = nap.IntervalSet( @@ -177,28 +135,17 @@ halves = nap.IntervalSet( ) ``` -Now we can compute the tuning curves for each half of the recording and plot the tuning curves for the fifth region of interest. +Now we can compute the tuning curves for each half of the recording and plot the tuning curves again. +```python jupyter={"outputs_hidden": false} +half1 = nap.compute_tuning_curves(transients, angle, bins = 120, epochs = halves.loc[[0]]) +half2 = nap.compute_tuning_curves(transients, angle, bins = 120, epochs = halves.loc[[1]]) -```{code-cell} ipython3 ---- -jupyter: - outputs_hidden: false ---- -half1 = nap.compute_1d_tuning_curves_continuous(transients, angle, nb_bins = 120, ep = halves.loc[[0]]) -half2 = nap.compute_1d_tuning_curves_continuous(transients, angle, nb_bins = 120, ep = halves.loc[[1]]) - -plt.figure(figsize=(12, 5)) -plt.subplot(1,2,1) -plt.plot(half1[4]) -plt.title("First half") -plt.xlabel("Angle") -plt.ylabel("Fluorescence") -plt.subplot(1,2,2) -plt.plot(half2[4]) -plt.title("Second half") -plt.xlabel("Angle") -plt.show() +fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5)) +set_metadata(half1[4]).plot(ax=ax1) +ax1.set_title("First half") +set_metadata(half2[4]).plot(ax=ax2) +ax2.set_title("Second half") ``` :::{card} From f8c3a0a5e404a3fc1f5c6e25eadc40fb1a476485 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 9 Jul 2025 15:13:55 +0000 Subject: [PATCH 017/244] input_output notebook --- doc/user_guide/02_input_output.md | 2 +- pynapple/process/tuning_curves.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/user_guide/02_input_output.md b/doc/user_guide/02_input_output.md index 695d37a11..af12ed61d 100644 --- a/doc/user_guide/02_input_output.md +++ b/doc/user_guide/02_input_output.md @@ -113,7 +113,7 @@ You can still apply any high level function of pynapple. For example here, we co ```{code-cell} ipython3 -tc = nap.compute_1d_tuning_curves(data['units'], data['y'], 10) +tc = nap.compute_tuning_curves(data['units'], data['y'], 10) ``` diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 27f44eadc..570aca6f3 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -188,7 +188,7 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= return xr.DataArray( tcs, - name="tuning curves", + name="Response", coords={ "unit": keys, **{ From 52b7f87fdcb2130a402d68bbe3cc709895f4a6de Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 9 Jul 2025 17:19:03 +0000 Subject: [PATCH 018/244] update HD tutorial for new tuning curve function --- doc/examples/tutorial_HD_dataset.md | 159 ++++++++++++---------------- 1 file changed, 70 insertions(+), 89 deletions(-) diff --git a/doc/examples/tutorial_HD_dataset.md b/doc/examples/tutorial_HD_dataset.md index 9d39d2d5c..509b73094 100644 --- a/doc/examples/tutorial_HD_dataset.md +++ b/doc/examples/tutorial_HD_dataset.md @@ -1,17 +1,18 @@ --- -jupytext: - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.16.4 -kernelspec: - display_name: Python 3 - language: python - name: python3 +jupyter: + jupytext: + default_lexer: ipython3 + text_representation: + extension: .md + format_name: markdown + format_version: '1.3' + jupytext_version: 1.17.2 + kernelspec: + display_name: pynapple + language: python + name: python3 --- - Analysing head-direction cells ============ @@ -19,16 +20,15 @@ This tutorial demonstrates how we use Pynapple to generate Figure 4a in the [pub The NWB file for the example is hosted on [OSF](https://osf.io/jb2gd). We show below how to stream it. The entire dataset can be downloaded [here](https://dandiarchive.org/dandiset/000056). - -```{code-cell} ipython3 -import numpy as np +```python +import scipy import pandas as pd +import numpy as np import pynapple as nap -import scipy.ndimage import matplotlib.pyplot as plt import seaborn as sns -import requests, math, os - +import requests, os +import xarray as xr custom_params = {"axes.spines.right": False, "axes.spines.top": False} sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params) @@ -40,8 +40,7 @@ Downloading the data It's a small NWB file. - -```{code-cell} ipython3 +```python path = "Mouse32-140822.nwb" if path not in os.listdir("."): r = requests.get(f"https://osf.io/jb2gd/download", stream=True) @@ -57,15 +56,13 @@ Parsing the data The first step is to load the data and other relevant variables of interest - -```{code-cell} ipython3 +```python data = nap.load_file(path) # Load the NWB file for this dataset ``` -What does this look like ? +What does this look like? - -```{code-cell} ipython3 +```python print(data) ``` @@ -75,17 +72,15 @@ Head-Direction Tuning Curves To plot Head-Direction Tuning curves, we need the spike timings and the orientation of the animal. These quantities are stored in the variables 'units' and 'ry'. - -```{code-cell} ipython3 +```python spikes = data["units"] # Get spike timings epochs = data["epochs"] # Get the behavioural epochs (in this case, sleep and wakefulness) angle = data["ry"] # Get the tracked orientation of the animal ``` -What does this look like ? - +What does this look like? -```{code-cell} ipython3 +```python print(spikes) ``` @@ -93,15 +88,13 @@ Here, rate is the mean firing rate of the unit. Location indicates the brain reg This dataset contains units recorded from the anterior thalamus. Head-direction (HD) cells are found in the anterodorsal nucleus of the thalamus (henceforth referred to as ADn). Units were also recorded from nearby thalamic nuclei in this animal. For the purposes of our tutorial, we are interested in the units recorded in ADn. We can restrict ourselves to analysis of these units rather easily, using Pynapple. - -```{code-cell} ipython3 +```python spikes_adn = spikes.getby_category("location")["adn"] # Select only those units that are in ADn ``` -What does this look like ? - +What does this look like? -```{code-cell} ipython3 +```python print(spikes_adn) ``` @@ -109,72 +102,60 @@ Let's compute some head-direction tuning curves. To do this in Pynapple, all you Plot firing rate of ADn units as a function of heading direction, i.e. a head-direction tuning curve - -```{code-cell} ipython3 -tuning_curves = nap.compute_1d_tuning_curves( +```python +tuning_curves = nap.compute_tuning_curves( group=spikes_adn, - feature=angle, - nb_bins=61, - ep=epochs[epochs.tags == "wake"], - minmax=(0, 2 * np.pi) + features=angle, + bins=61, + epochs=epochs[epochs.tags == "wake"], + range=[(0, 2 * np.pi)] ) ``` -What does this look like ? - +What does this look like? -```{code-cell} ipython3 +```python tuning_curves ``` -Each row indicates an angular bin (in radians), and each column corresponds to a single unit. Let's compute the preferred angle quickly as follows: +It is an `xarray.DataArray` with one dimension representing units, and another for head-direction angles. +Let's compute the preferred angle quickly as follows: - -```{code-cell} ipython3 -pref_ang = tuning_curves.idxmax() +```python +pref_ang = tuning_curves.idxmax(dim="feature0") ``` For easier visualization, we will colour our plots according to the preferred angle of the cell. To do so, we will normalize the range of angles we have, over a colourmap. - -```{code-cell} ipython3 +```python norm = plt.Normalize() # Normalizes data into the range [0,1] -color = plt.cm.hsv(norm([i / (2 * np.pi) for i in pref_ang.values])) # Assigns a colour in the HSV colourmap for each value of preferred angle -color = pd.DataFrame(index=pref_ang.index, data = color, columns = ['r', 'g', 'b', 'a']) +color = plt.cm.hsv(norm([i / (2 * np.pi) for i in pref_ang])) # Assigns a colour in the HSV colourmap for each value of preferred angle +color = xr.DataArray( + color, + dims=("unit", "color"), + coords={"unit": pref_ang.unit} +) ``` -To make the tuning curves look nice, we will smooth them before plotting, using this custom function: +To make the tuning curves look nice, we will smooth them before plotting: - -```{code-cell} ipython3 +```python from scipy.ndimage import gaussian_filter1d -def smoothAngularTuningCurves(tuning_curves, sigma=2): - - tmp = np.concatenate((tuning_curves.values, tuning_curves.values, tuning_curves.values)) - tmp = gaussian_filter1d(tmp, sigma=sigma, axis=0) - - return pd.DataFrame(index = tuning_curves.index, - data = tmp[tuning_curves.shape[0]:tuning_curves.shape[0]*2], - columns = tuning_curves.columns - ) -``` - -Therefore, we have: - -```{code-cell} ipython3 -smoothcurves = smoothAngularTuningCurves(tuning_curves, sigma=3) +tmp = np.concatenate((tuning_curves.values, tuning_curves.values, tuning_curves.values), axis=1) +tmp = gaussian_filter1d(tmp, sigma=3, axis=1) +tuning_curves.values = tmp[:, tuning_curves.shape[1]:2*tuning_curves.shape[1]] ``` What does this look like? Let's plot the tuning curves! - -```{code-cell} ipython3 +```python +sorted_tuning_curves = tuning_curves.sortby(pref_ang) plt.figure(figsize=(12, 9)) -for i, n in enumerate(pref_ang.sort_values().index.values): +for i, n in enumerate(tuning_curves.unit.values): plt.subplot(8, 4, i + 1, projection='polar') # Plot the curves in 8 rows and 4 columns plt.plot( - smoothcurves[n], color=color.loc[n] + sorted_tuning_curves.coords["feature0"], sorted_tuning_curves.sel(unit=n).values, color=color.sel(unit=n).values ) # Colour of the curves determined by preferred angle plt.xticks([]) plt.show() @@ -183,8 +164,6 @@ plt.show() Awesome! -+++ - *** Decoding ------------------ @@ -193,10 +172,14 @@ Now that we have HD tuning curves, we can go one step further. Using only the po To decode the population activity, we will be using a Bayesian Decoder as implemented in Pynapple. Just a single line of code! +```python +print(tuning_curves.to_pandas().T) +print(spikes_adn) +``` -```{code-cell} ipython3 +```python decoded, proba_feature = nap.decode_1d( - tuning_curves=tuning_curves, + tuning_curves=tuning_curves.to_pandas().T, group=spikes_adn, ep=epochs[epochs.tags == "wake"], bin_size=0.1, # second @@ -206,15 +189,13 @@ decoded, proba_feature = nap.decode_1d( What does this look like ? - -```{code-cell} ipython3 +```python print(decoded) ``` The variable 'decoded' indicates the most probable angle in which the animal was looking. There is another variable, 'proba_feature' that denotes the probability of a given angular bin at a given time point. We can look at it below: - -```{code-cell} ipython3 +```python print(proba_feature) ``` @@ -222,17 +203,16 @@ Each row is a time bin, and each column is an angular bin. The sum of all values Now, let's plot the raster plot for a given period of time, and overlay the actual and decoded HD on the population activity. - -```{code-cell} ipython3 +```python ep = nap.IntervalSet( start=10717, end=10730 ) # Select an arbitrary interval for plotting -plt.figure() +plt.subplots(figsize=(12, 6)) plt.rc("font", size=12) for i, n in enumerate(spikes_adn.keys()): plt.plot( - spikes[n].restrict(ep).fillna(pref_ang[n]), "|", color=color.loc[n] + spikes[n].restrict(ep).fillna(pref_ang.sel(unit=n).item()), "|", color=color.sel(unit=n).values ) # raster plot for each cell plt.plot( decoded.restrict(ep), "--", color="grey", linewidth=2, label="decoded HD" @@ -247,8 +227,7 @@ From this plot, we can see that the decoder is able to estimate the head-directi What does the probability distribution in this example event look like? Ideally, the bins with the highest probability will correspond to the bins having the most spikes. Let's plot the probability matrix to visualize this. - -```{code-cell} ipython3 +```python smoothed = scipy.ndimage.gaussian_filter( proba_feature, 1 ) # Smoothening the probability distribution @@ -284,6 +263,7 @@ plt.ylabel("Angle (rad)") # Y-axis is the angle in radian plt.colorbar(label="probability") ``` + From this probability distribution, we observe that the decoded HD very closely matches the actual HD. Therefore, the population activity in ADn is a reliable estimate of the heading direction of the animal. I hope this tutorial was helpful. If you have any questions, comments or suggestions, please feel free to reach out to the Pynapple Team! @@ -296,4 +276,5 @@ Dhruv Mehrotra Guillaume Viejo -::: \ No newline at end of file +::: + From 4bdf81a4a8b45a532ab71db3e91ee5ba7c484e5c Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 9 Jul 2025 18:24:24 +0000 Subject: [PATCH 019/244] more doc updates for tuning curves --- README.md | 22 ++++++------ doc/user_guide/01_introduction_to_pynapple.md | 4 --- pynapple/process/tuning_curves.py | 1 - tests/test_metadata.py | 36 ++++++++++--------- tests/test_tuning_curves_general.py | 9 ++--- 5 files changed, 34 insertions(+), 38 deletions(-) diff --git a/README.md b/README.md index d6486bc02..87e56fdf1 100644 --- a/README.md +++ b/README.md @@ -123,25 +123,27 @@ import numpy as np import pynapple as nap # LOADING DATA FROM NWB -data = nap.load_file("A2929-200711.nwb") +data = nap.load_file("doc/user_guide/A2929-200711.nwb") spikes = data["units"] head_direction = data["ry"] wake_ep = data["position_time_support"] # COMPUTING TUNING CURVES -tuning_curves = nap.compute_1d_tuning_curves( - spikes, head_direction, 120, ep=wake_ep, minmax=(0, 2 * np.pi) +tuning_curves = nap.compute_tuning_curves( + spikes, head_direction, 120, epochs=wake_ep, range=[(0, 2 * np.pi)] ) - # PLOT -plt.figure() -for i in spikes: - plt.subplot(3, 5, i + 1, projection="polar") - plt.plot(tuning_curves[i]) - plt.xticks([0, np.pi / 2, np.pi, 3 * np.pi / 2]) - +g=tuning_curves.plot( + row="unit", + col_wrap=5, + subplot_kws={"projection": "polar"}, + sharey=False +) +plt.xticks([0, np.pi / 2, np.pi, 3 * np.pi / 2]) +g.set_titles("") +g.set_xlabels("") plt.show() ``` Shown below, the final figure from the example code displays the firing rate of 15 neurons as a function of the direction of the head of the animal in the horizontal plane. diff --git a/doc/user_guide/01_introduction_to_pynapple.md b/doc/user_guide/01_introduction_to_pynapple.md index 7140859ea..d3e9bdc8f 100644 --- a/doc/user_guide/01_introduction_to_pynapple.md +++ b/doc/user_guide/01_introduction_to_pynapple.md @@ -427,10 +427,6 @@ Overview of advanced analysis The `process` module of pynapple contains submodules that group methods that can be applied for high level analysis. All of the method are directly available from the `nap` namespace. -:::{important} -Some functions have been doubled given the nature of the data. For instance, computing a 1d tuning curves from spiking activity requires the [`nap.compute_1d_tuning_curves`](pynapple.process.tuning_curves.compute_1d_tuning_curves). The same function for calcium imaging data which is a continuous time series is available with [`nap.compute_1d_tuning_curves_continuous`](pynapple.process.tuning_curves.compute_1d_tuning_curves_continuous). -::: - **[Discrete correlograms & ISI](05_correlograms_isi)** This module analyses discrete events, specifically correlograms (for example by computing the cross-correlograms of a population of neurons) and interspike interval (ISI) distributions. diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 570aca6f3..79cf8a963 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -188,7 +188,6 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= return xr.DataArray( tcs, - name="Response", coords={ "unit": keys, **{ diff --git a/tests/test_metadata.py b/tests/test_metadata.py index b5c95a85c..1ac59f08c 100644 --- a/tests/test_metadata.py +++ b/tests/test_metadata.py @@ -9,6 +9,7 @@ import numpy as np import pandas as pd import pytest +import xarray import pynapple as nap from pynapple.core.metadata_class import _Metadata @@ -1786,18 +1787,18 @@ def test_metadata_groupby_apply_func_kwargs( "func, ep, err", [ ( # input_key is not string - nap.compute_1d_tuning_curves, + nap.compute_tuning_curves, 1, pytest.raises(TypeError, match="input_key must be a string"), ), ( # input_key does not exist in function - nap.compute_1d_tuning_curves, + nap.compute_tuning_curves, "epp", pytest.raises(KeyError, match="does not have input parameter"), ), ( # function missing required inputs, or incorrect input type - nap.compute_1d_tuning_curves, - "ep", + nap.compute_tuning_curves, + "epochs", pytest.raises(TypeError), ), ], @@ -2375,7 +2376,7 @@ def tsdframe_gba(self): def test_metadata_groupby_apply_tuning_curves(self, tsgroup_gba, iset_gba): """ - Test for groupby_apply with nap.compute_1d_tuning_curves when: + Test for groupby_apply with nap.compute_tuning_curves when: 1. a TsGroup is grouped 2. an IntervalSet is grouped and makes sure the outputs are different. @@ -2385,30 +2386,31 @@ def test_metadata_groupby_apply_tuning_curves(self, tsgroup_gba, iset_gba): # apply to intervalset out = iset_gba.groupby_apply( "label", - nap.compute_1d_tuning_curves, - "ep", + nap.compute_tuning_curves, + "epochs", group=tsgroup_gba, - feature=feature, - nb_bins=5, + features=feature, + bins=5, ) for grp, idx in iset_gba.groupby("label").items(): - tmp = nap.compute_1d_tuning_curves( - tsgroup_gba, feature, nb_bins=5, ep=iset_gba[idx] + tmp = nap.compute_tuning_curves( + tsgroup_gba, feature, bins=5, epochs=iset_gba[idx] ) - pd.testing.assert_frame_equal(out[grp], tmp) + xarray.testing.assert_identical(out[grp], tmp) # apply to tsgroup out2 = tsgroup_gba.groupby_apply( "label", - nap.compute_1d_tuning_curves, - feature=feature, - nb_bins=5, + nap.compute_tuning_curves, + features=feature, + bins=5, ) + # make sure groups are different assert out2.keys() != out.keys() for grp, idx in tsgroup_gba.groupby("label").items(): - tmp = nap.compute_1d_tuning_curves(tsgroup_gba[idx], feature, nb_bins=5) - pd.testing.assert_frame_equal(out2[grp], tmp) + tmp = nap.compute_tuning_curves(tsgroup_gba[idx], feature, bins=5) + xarray.testing.assert_identical(out2[grp], tmp) def test_metadata_groupby_apply_tsgroup_lambda(self, tsgroup_gba): """ diff --git a/tests/test_tuning_curves_general.py b/tests/test_tuning_curves_general.py index 5d4e3d462..cac6832ce 100644 --- a/tests/test_tuning_curves_general.py +++ b/tests/test_tuning_curves_general.py @@ -424,9 +424,6 @@ def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation) ], ) def test_compute_tuning_curves(group, features, kwargs, expected): - tcs = nap.compute_tuning_curves(group, features, **kwargs) - assert isinstance(tcs, xr.DataArray) - for dim in expected.coords.keys(): - assert dim in tcs.coords - np.testing.assert_allclose(tcs.coords[dim].values, expected.coords[dim].values) - np.testing.assert_allclose(tcs, expected) + xr.testing.assert_allclose( + nap.compute_tuning_curves(group, features, **kwargs), expected + ) From cfeabacfcfd3e4753c611a689832b350b7c0d123 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 9 Jul 2025 18:26:31 +0000 Subject: [PATCH 020/244] fix minimal working example path --- README.md | 2 +- main.py | 17 ++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 87e56fdf1..6771ead2e 100644 --- a/README.md +++ b/README.md @@ -123,7 +123,7 @@ import numpy as np import pynapple as nap # LOADING DATA FROM NWB -data = nap.load_file("doc/user_guide/A2929-200711.nwb") +data = nap.load_file("A2929-200711.nwb") spikes = data["units"] head_direction = data["ry"] diff --git a/main.py b/main.py index 62118ab79..9347539ab 100644 --- a/main.py +++ b/main.py @@ -16,16 +16,15 @@ wake_ep = data["position_time_support"] # COMPUTING TUNING CURVES -tuning_curves = nap.compute_1d_tuning_curves( - spikes, head_direction, 120, minmax=(0, 2 * np.pi) +tuning_curves = nap.compute_tuning_curves( + spikes, head_direction, 120, epochs=wake_ep, range=[(0, 2 * np.pi)] ) - # PLOT -plt.figure() -for i in spikes: - plt.subplot(3, 5, i + 1, projection="polar") - plt.plot(tuning_curves[i]) - plt.xticks([0, np.pi / 2, np.pi, 3 * np.pi / 2]) - +g = tuning_curves.plot( + row="unit", col_wrap=5, subplot_kws={"projection": "polar"}, sharey=False +) +plt.xticks([0, np.pi / 2, np.pi, 3 * np.pi / 2]) +g.set_titles("") +g.set_xlabels("") plt.show() From d67b2c29b6cb2f114c4db9e2f0bdebcfbf81f0f8 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 9 Jul 2025 18:29:11 +0000 Subject: [PATCH 021/244] cleaner to pandas for old 1d function --- pynapple/process/tuning_curves.py | 42 +++++++++++++++---------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 79cf8a963..bb954554b 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -207,17 +207,16 @@ def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): DeprecationWarning, stacklevel=2, ) - xarray = compute_tuning_curves( - group, - feature, - nb_bins, - range=None if minmax is None else [minmax], - epochs=ep, - ) - return pd.DataFrame( - xarray.values.T, - index=xarray.coords["feature0"].values, - columns=xarray.coords["unit"].values, + return ( + compute_tuning_curves( + group, + feature, + nb_bins, + range=None if minmax is None else [minmax], + epochs=ep, + ) + .to_pandas() + .T ) @@ -231,17 +230,16 @@ def compute_1d_tuning_curves_continuous( DeprecationWarning, stacklevel=2, ) - xarray = compute_tuning_curves( - tsdframe, - feature, - nb_bins, - range=None if minmax is None else [minmax], - epochs=ep, - ) - return pd.DataFrame( - xarray.values.T, - index=xarray.coords["feature0"].values, - columns=xarray.coords["unit"].values, + return ( + compute_tuning_curves( + tsdframe, + feature, + nb_bins, + range=None if minmax is None else [minmax], + epochs=ep, + ) + .to_pandas() + .T ) From 16971bea88f5f04ec517c9a221f6b62add8a57fb Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 9 Jul 2025 18:57:00 +0000 Subject: [PATCH 022/244] more doc updates --- doc/user_guide/06_tuning_curves.md | 178 +++++++++++++---------------- doc/user_guide/07_decoding.md | 8 +- tests/test_decoding.py | 14 ++- 3 files changed, 92 insertions(+), 108 deletions(-) diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index 3b86e235c..3a0edd84e 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -1,14 +1,16 @@ --- -jupytext: - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.16.4 -kernelspec: - display_name: Python 3 - language: python - name: python3 +jupyter: + jupytext: + default_lexer: ipython3 + text_representation: + extension: .md + format_name: markdown + format_version: '1.3' + jupytext_version: 1.17.2 + kernelspec: + display_name: pynapple + language: python + name: python3 --- # Tuning curves @@ -19,13 +21,7 @@ and 2-dimensional tuning curves (for example firing rate as a function of position). It can also compute average firing rate for different epochs (for example firing rate for different epochs of stimulus presentation). -:::{important} -If you are using calcium imaging data with the activity of the cell as a continuous transient, the function to call ends with `_continuous` for continuous time series (e.g. [`compute_1d_tuning_curves_continuous`](pynapple.process.tuning_curves.compute_1d_tuning_curves_continuous)). -::: - - -```{code-cell} ipython3 -:tags: [hide-cell] +```python tags=["hide-cell"] import pynapple as nap import numpy as np import matplotlib.pyplot as plt @@ -35,9 +31,7 @@ custom_params = {"axes.spines.right": False, "axes.spines.top": False} sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params) ``` -```{code-cell} -:tags: [hide-cell] - +```python tags=["hide-cell"] group = { 0: nap.Ts(t=np.sort(np.random.uniform(0, 100, 10))), 1: nap.Ts(t=np.sort(np.random.uniform(0, 100, 20))), @@ -47,11 +41,14 @@ group = { tsgroup = nap.TsGroup(group) ``` + ## From epochs -The epochs should be stored in a dictionnary: -```{code-cell} ipython3 +The epochs should be stored in a dictionnary: + + +```python dict_ep = { "stim0": nap.IntervalSet(start=0, end=20), "stim1":nap.IntervalSet(start=30, end=70) @@ -62,21 +59,17 @@ dict_ep = { The output is a pandas DataFrame where each column is a unit in the `TsGroup` and each row is one `IntervalSet` type. The value is the mean firing rate of the neuron during this set of intervals. -```{code-cell} ipython3 +```python mean_fr = nap.compute_discrete_tuning_curves(tsgroup, dict_ep) pprint(mean_fr) ``` - ## From timestamps activity -### 1-dimension tuning curves - - -```{code-cell} ipython3 -:tags: [hide-cell] +### 1-dimensional tuning curves +```python tags=["hide-cell"] from scipy.ndimage import gaussian_filter1d # Fake Tuning curves @@ -105,38 +98,36 @@ tsgroup = nap.TsGroup( ``` Mandatory arguments are `TsGroup`, `Tsd` (or `TsdFrame` with 1 column only) -and `nb_bins` for number of bins of the tuning curves. +and `bins` for number of bins of the tuning curves. -If an `IntervalSet` is passed with `ep`, everything is restricted to `ep` +If an `IntervalSet` is passed with `epochs`, everything is restricted to `epochs` otherwise the time support of the feature is used. -The min and max of the tuning curve is by default the min and max of the feature. This can be tweaked with the argument `minmax`. +The min and max of the tuning curve is by default the min and max of the feature. This can be tweaked with the argument `range`. -The output is a pandas DataFrame. Each column is a unit from `TsGroup` argument. The index of the DataFrame carries the center of the bin in feature space. +The output is an `xarray.DataArray` with a unit and feature dimension. -```{code-cell} ipython3 -tuning_curve = nap.compute_1d_tuning_curves( +```python +tuning_curves_1d = nap.compute_tuning_curves( group=tsgroup, - feature=feature, - nb_bins=120, - minmax=(0, 2*np.pi) + features=feature, + bins=120, + range=[(0, 2*np.pi)] ) - -print(tuning_curve) +tuning_curves_1d ``` -```{code-cell} ipython3 -plt.figure() -plt.plot(tuning_curve) +```python +tuning_curves_1d.plot.line(x="feature0", add_legend=False) plt.xlabel("Feature space") plt.ylabel("Firing rate (Hz)") plt.show() ``` -Internally, the function is calling the method [`value_from`](pynapple.Tsd.value_from) which maps timestamps to their closest values in time from a `Tsd` object. It is then possible to validate the tuning curves by displaying the timestamps as well as their associated values. +Internally, the function is calling the method [`value_from`](pynapple.Tsd.value_from) which maps timestamps to their closest values in time from a `Tsd` object. +It is then possible to validate the tuning curves by displaying the timestamps as well as their associated values. -```{code-cell} ipython3 -:tags: [hide-input] +```python tags=["hide-input"] plt.figure() plt.subplot(121) plt.plot(tsgroup[3].value_from(feature), 'o') @@ -145,7 +136,7 @@ plt.ylabel("Feature") plt.xlim(0, 2) plt.xlabel("Time (s)") plt.subplot(122) -plt.plot(tuning_curve[3].values, tuning_curve[3].index.values, label="Tuning curve (unit=3)") +plt.plot(tuning_curves_1d[3].values, tuning_curves_1d.coords["feature0"], label="Tuning curve (unit=3)") plt.xlabel("Firing rate (Hz)") plt.legend() plt.show() @@ -153,8 +144,7 @@ plt.show() ### 2-dimensional tuning curves -```{code-cell} ipython3 -:tags: [hide-cell] +```python tags=["hide-cell"] dt = 0.01 T = 10 epoch = nap.IntervalSet(start=0, end=T, time_units="s") @@ -173,33 +163,34 @@ tsgroup = nap.TsGroup({ }, time_support=epoch) ``` -The `group` argument must be a `TsGroup` object. -The `features` argument must be a 2-columns `TsdFrame` object. -`nb_bins` can be an int or a tuple of 2 ints. - -```{code-cell} ipython3 -tcurves2d, binsxy = nap.compute_2d_tuning_curves( +The `group` argument must be a `TsGroup` object. +The `features` argument must be a 2-columns `TsdFrame` object. +`bins` can be an int or a tuple of 2 ints. +`range` can be a list of two `(min, max)` tuples. + +```python +tuning_curves_2d = nap.compute_tuning_curves( group=tsgroup, features=features, - nb_bins=(5,5), - minmax=(-1, 1, -1, 1) + bins=(5,5), + range=[(-1, 1), (-1, 1)] ) -pprint(tcurves2d) +tuning_curves_2d ``` -`tcurves2d` is a dictionnary with each key a unit in `TsGroup`. `binsxy` is a numpy array representing the centers of the bins and is useful for plotting tuning curves. Bins that have never been visited by the feature have been assigned a NaN value. +`tuning_curve_2d` is an `xarray.DataArray` with three dimensions: one for the units of `TsGroup` and 2 for the features, the coordinates contain the centers of the bins. +Bins that have never been visited by the feature have been assigned a NaN value. Checking the accuracy of the tuning curves can be bone by displaying the spikes aligned to the features with the function `value_from` which assign to each spikes the corresponding features value for unit 0. -```{code-cell} ipython3 +```python ts_to_features = tsgroup[0].value_from(features) print(ts_to_features) ``` -`tsgroup[0]` which is a `Ts` object has been transformed to a `TsdFrame` object with each timestamps (spike times) being associated with a features value. -```{code-cell} ipython3 -:tags: [hide-input] +`tsgroup[0]` which is a `Ts` object has been transformed to a `TsdFrame` object with each timestamps (spike times) being associated with a features value. +```python tags=["hide-input"] plt.figure() plt.subplot(121) @@ -216,7 +207,7 @@ extents = ( np.min(features["b"]), np.max(features["b"]), ) -plt.imshow(tcurves2d[0], +plt.imshow(tuning_curves_2d[0], origin="lower", extent=extents, cmap="viridis", aspect='auto' ) @@ -229,17 +220,13 @@ plt.tight_layout() plt.show() ``` - ## From continuous activity -Tuning curves compute with the following functions are usually made with -data from calcium imaging activities. +Tuning curves computed in the following matter are usually made with data from calcium imaging activities. ### 1-dimensional tuning curves -```{code-cell} ipython3 -:tags: [hide-cell] - +```python tags=["hide-cell"] from scipy.ndimage import gaussian_filter1d # Fake Tuning curves @@ -267,34 +254,28 @@ tsdframe = nap.TsdFrame( ) ``` -Arguments are `TsdFrame` (for example continuous calcium data), `Tsd` or `TsdFrame` for the 1-d feature and `nb_bins` for the number of bins. +The same function `nap.compute_tuning_curves` can also take a `TsdFrame` (for example continuous calcium data) as input. -```{code-cell} ipython3 - -tuning_curves = nap.compute_1d_tuning_curves_continuous( - tsdframe=tsdframe, - feature=feature, - nb_bins=120, - minmax=(0, 2*np.pi) +```python +tuning_curves_1d = nap.compute_tuning_curves( + group=tsdframe, + features=feature, + bins=120, + range=[(0, 2*np.pi)] ) - -print(tuning_curves) +tuning_curves_1d ``` -```{code-cell} ipython3 -plt.figure() -plt.plot(tuning_curves) +```python +tuning_curves_1d.plot.line(x="feature0", add_legend=False) plt.xlabel("Feature space") -plt.ylabel("Mean activity") +plt.ylabel("Firing rate (Hz)") plt.show() ``` ### 2-dimensional tuning curves - -```{code-cell} ipython3 -:tags: [hide-cell] - +```python tags=["hide-cell"] dt = 0.01 T = 10 epoch = nap.IntervalSet(start=0, end=T, time_units="s") @@ -315,20 +296,16 @@ tsdframe = nap.TsdFrame( ) ``` -Arguments are `TsdFrame` (for example continuous calcium data), `Tsd` or `TsdFrame` for the 1-d feature and `nb_bins` for the number of bins. - -```{code-cell} ipython3 - -tuning_curves, xy = nap.compute_2d_tuning_curves_continuous( - tsdframe=tsdframe, +```python +tuning_curves_2d = nap.compute_tuning_curves( + group=tsdframe, features=features, - nb_bins=5, + bins=5, ) - -print(tuning_curves) +tuning_curves_2d ``` -```{code-cell} ipython3 +```python plt.figure() plt.subplot(121) plt.plot(features["b"], features["a"], label="features") @@ -343,7 +320,7 @@ extents = ( np.min(features["b"]), np.max(features["b"]), ) -plt.imshow(tuning_curves[0], +plt.imshow(tuning_curves_2d[0], origin="lower", extent=extents, cmap="viridis", aspect='auto' ) @@ -356,5 +333,6 @@ plt.tight_layout() plt.show() ``` +```python - +``` diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index 310ba660f..3d6efae5b 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -66,9 +66,9 @@ epoch = nap.IntervalSet(0, 10) To decode, we need to compute tuning curves in 1D. ```{code-cell} ipython3 -tcurves_1d = nap.compute_1d_tuning_curves( - tsgroup, feature, nb_bins=61, minmax=(0, 2 * np.pi) -) +tcurves_1d = nap.compute_tuning_curves( + tsgroup, feature, bins=61, range=[(0, 2 * np.pi)] +).to_pandas().T ``` We can display the tuning curves of each neurons @@ -218,4 +218,4 @@ plt.title("Feature b") plt.legend() plt.tight_layout() plt.show() -``` \ No newline at end of file +``` diff --git a/tests/test_decoding.py b/tests/test_decoding.py index c5f95802c..05d0ed08b 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -17,8 +17,12 @@ def get_testing_set_1d(): feature = nap.Tsd(t=np.arange(0, 100, 1), d=np.repeat(np.arange(0, 2), 50)) group = nap.TsGroup({i: nap.Ts(t=np.arange(0, 50) + 50 * i) for i in range(2)}) - tc = nap.compute_1d_tuning_curves( - group=group, feature=feature, nb_bins=2, minmax=(-0.5, 1.5) + tc = ( + nap.compute_tuning_curves( + group=group, features=feature, bins=2, range=[(-0.5, 1.5)] + ) + .to_pandas() + .T ) ep = nap.IntervalSet(start=0, end=100) return feature, group, tc, ep @@ -131,9 +135,11 @@ def get_testing_set_2d(): } ) - tc, xy = nap.compute_2d_tuning_curves( - group=group, features=features, nb_bins=2, minmax=(-0.5, 1.5, -0.5, 1.5) + tc = nap.compute_tuning_curves( + group=group, features=features, bins=2, range=[(-0.5, 1.5), (-0.5, 1.5)] ) + xy = [tc.coords[dim].values for dim in tc.coords if dim != "unit"] + tc = {c: tc.sel(unit=c).values for c in tc.coords["unit"].values} ep = nap.IntervalSet(start=0, end=100) return features, group, tc, ep, tuple(xy) From 39a9f62e9bc1c9d04991048d76a65563e7ffd6aa Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 9 Jul 2025 19:26:10 +0000 Subject: [PATCH 023/244] decoding notebook --- doc/user_guide/07_decoding.md | 104 +++++++++++++++------------------- 1 file changed, 45 insertions(+), 59 deletions(-) diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index 3d6efae5b..da7bcc8e9 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -1,20 +1,21 @@ --- -jupytext: - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.16.4 -kernelspec: - display_name: Python 3 - language: python - name: python3 +jupyter: + jupytext: + default_lexer: ipython3 + text_representation: + extension: .md + format_name: markdown + format_version: '1.3' + jupytext_version: 1.17.2 + kernelspec: + display_name: pynapple + language: python + name: python3 --- # Decoding -```{code-cell} ipython3 -:tags: [hide-cell] +```python tags=["hide-cell"] import pynapple as nap import numpy as np import pandas as pd @@ -23,18 +24,19 @@ import seaborn as sns custom_params = {"axes.spines.right": False, "axes.spines.top": False} sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params) ``` + + Pynapple supports 1 dimensional and 2 dimensional bayesian decoding. The function returns the decoded feature as well as the probabilities for each timestamps. :::{hint} -Input to the bayesian decoding functions always include the tuning curves computed from [`nap.compute_1d_tuning_curves`](pynapple.process.tuning_curves.compute_1d_tuning_curves) or [`nap.compute_2d_tuning_curves`](pynapple.process.tuning_curves.compute_2d_tuning_curves). +Input to the bayesian decoding functions always include the tuning curves computed using [`nap.compute_tuning_curves`](pynapple.process.tuning_curves.compute_tuning_curves). ::: ## 1-dimensional decoding + -```{code-cell} ipython3 -:tags: [hide-cell] - +```python tags=["hide-cell"] from scipy.ndimage import gaussian_filter1d # Fake Tuning curves @@ -65,19 +67,16 @@ epoch = nap.IntervalSet(0, 10) To decode, we need to compute tuning curves in 1D. -```{code-cell} ipython3 -tcurves_1d = nap.compute_tuning_curves( +```python +tuning_curves_1d = nap.compute_tuning_curves( tsgroup, feature, bins=61, range=[(0, 2 * np.pi)] -).to_pandas().T +) ``` We can display the tuning curves of each neurons -```{code-cell} -:tags: [hide-input] - -plt.figure() -plt.plot(tcurves_1d) +```python tags=["hide-input"] +tuning_curves_1d.plot.line(x="feature0", add_legend=False) plt.xlabel("Feature position") plt.ylabel("Rate (Hz)") plt.show() @@ -85,10 +84,9 @@ plt.show() `nap.decode_1d` performs bayesian decoding: - -```{code-cell} ipython3 +```python decoded, proba_feature = nap.decode_1d( - tuning_curves=tcurves_1d , # 2D tuning curves + tuning_curves=tcurves_1d.to_pandas().T, # 1D tuning curves group=tsgroup, # Spiking activity ep=epoch, # Small epoch bin_size=0.06, # How to bin the spike trains @@ -98,8 +96,7 @@ decoded, proba_feature = nap.decode_1d( `decoded` is `Tsd` object containing the decoded feature value. `proba_feature` is a `TsdFrame` containing the probabilities of being in a particular feature bin over time. -```{code-cell} ipython3 -:tags: [hide-input] +```python tags=["hide-input"] plt.figure(figsize=(12, 6)) plt.subplot(211) plt.plot(feature.restrict(epoch), label="True") @@ -111,17 +108,13 @@ plt.imshow(proba_feature.values.T, aspect="auto", origin="lower", cmap="viridis" plt.xticks([0, len(decoded)], epoch.values[0]) plt.xlabel("Time (s)") plt.show() - ``` - ## 2-dimensional decoding -```{code-cell} ipython3 -:tags: [hide-cell] - +```python tags=["hide-cell"] dt = 0.1 -epoch = nap.IntervalSet(start=0, end=1000, time_units="s") +epochs = nap.IntervalSet(start=0, end=1000, time_units="s") features = np.vstack((np.cos(np.arange(0, 1000, dt)), np.sin(np.arange(0, 1000, dt)))).T features = nap.TsdFrame(t=np.arange(0, 1000, dt), d=features, @@ -141,54 +134,43 @@ for i in range(12): ts_group[i] = nap.Ts(ts, time_units="us") ts_group = nap.TsGroup(ts_group, time_support=epoch) - ``` To decode, we need to compute tuning curves in 2D. - -```{code-cell} ipython3 -tcurves2d, binsxy = nap.compute_2d_tuning_curves( +```python +tuning_curves_2d = nap.compute_tuning_curves( group=ts_group, # Spiking activity of 12 neurons features=features, # 2-dimensional features - nb_bins=10, - ep=epoch, - minmax=(-1.0, 1.0, -1.0, 1.0), # Minmax of the features + bins=10, + epochs=epochs, + range=[(-1.0, 1.0), (-1.0, 1.0)], # Minmax of the features ) ``` We can display the tuning curves of each neuron -```{code-cell} -:tags: [hide-input] - -plt.figure(figsize=(20, 9)) -for i in ts_group.keys(): - plt.subplot(2, 6, i + 1) - plt.imshow( - tcurves2d[i], extent=(binsxy[1][0], binsxy[1][-1], binsxy[0][0], binsxy[0][-1]) - ) - plt.xticks() -plt.show() +```python +tuning_curves_2d.plot(row="unit", col_wrap=6) ``` `nap.decode_2d` performs bayesian decoding: +```python +tcs = {c: tuning_curves_2d.sel(unit=c).values for c in tuning_curves_2d.coords["unit"].values} +bins = [tuning_curves_2d.coords[dim].values for dim in tuning_curves_2d.coords if dim != "unit"] -```{code-cell} ipython3 decoded, proba_feature = nap.decode_2d( - tuning_curves=tcurves2d, # 2D tuning curves + tuning_curves=tcs, # 2D tuning curves group=ts_group, # Spiking activity ep=epoch, # Epoch bin_size=0.1, # How to bin the spike trains - xy=binsxy, # Features position + xy=bins, # Features position features=features, # Features ) ``` -```{code-cell} ipython3 -:tags: [hide-input] - +```python tags=["hide-input"] plt.figure(figsize=(15, 5)) plt.subplot(131) plt.plot(features["a"].get(0,20), label="True") @@ -219,3 +201,7 @@ plt.legend() plt.tight_layout() plt.show() ``` + +```python + +``` From 13fbae3a9174c515d988291b736393c0cae82e51 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 9 Jul 2025 19:38:43 +0000 Subject: [PATCH 024/244] tutorial phase pref doc --- doc/examples/tutorial_phase_preferences.md | 95 +++++++++------------- 1 file changed, 38 insertions(+), 57 deletions(-) diff --git a/doc/examples/tutorial_phase_preferences.md b/doc/examples/tutorial_phase_preferences.md index 35e9e0599..e42af88e7 100644 --- a/doc/examples/tutorial_phase_preferences.md +++ b/doc/examples/tutorial_phase_preferences.md @@ -1,17 +1,18 @@ --- -jupytext: - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.16.4 -kernelspec: - display_name: Python 3 - language: python - name: python3 +jupyter: + jupytext: + default_lexer: ipython3 + text_representation: + extension: .md + format_name: markdown + format_version: '1.3' + jupytext_version: 1.17.2 + kernelspec: + display_name: pynapple + language: python + name: python3 --- - Spikes-phase coupling ===================== @@ -20,8 +21,7 @@ with spiking data, to find phase preferences of spiking units. Specifically, we will examine LFP and spiking data from a period of REM sleep, after traversal of a linear track. - -```{code-cell} ipython3 +```python import math import os @@ -43,8 +43,7 @@ Downloading the data ------------------ Let's download the data and save it locally - -```{code-cell} ipython3 +```python path = "Achilles_10252013_EEG.nwb" if path not in os.listdir("."): r = requests.get(f"https://osf.io/2dfvp/download", stream=True) @@ -59,8 +58,7 @@ Loading the data ------------------ Let's load and print the full dataset. - -```{code-cell} ipython3 +```python data = nap.load_file(path) FS = 1250 # We know from the methods of the paper print(data) @@ -71,8 +69,7 @@ Selecting slices ----------------------------------- For later visualization, we define an interval of 3 seconds of data during REM sleep. - -```{code-cell} ipython3 +```python ep_ex_rem = nap.IntervalSet( data["rem"]["start"][0] + 97.0, data["rem"]["start"][0] + 100.0, @@ -81,8 +78,7 @@ ep_ex_rem = nap.IntervalSet( Here we restrict the lfp to the REM epochs. - -```{code-cell} ipython3 +```python tsd_rem = data["eeg"][:,0].restrict(data["rem"]) # We will also extract spike times from all units in our dataset @@ -95,8 +91,7 @@ Plotting the LFP Activity ----------------------------------- We should first plot our REM Local Field Potential data. - -```{code-cell} ipython3 +```python fig, ax = plt.subplots(1, constrained_layout=True, figsize=(10, 3)) ax.plot(tsd_rem.restrict(ep_ex_rem)) ax.set_title("REM Local Field Potential") @@ -114,23 +109,20 @@ frequencies present in the data. We must define the frequency set that we'd like to use for our decomposition. - -```{code-cell} ipython3 +```python freqs = np.geomspace(5, 200, 25) ``` We compute the wavelet transform on our LFP data (only during the example interval). - -```{code-cell} ipython3 +```python cwt_rem = nap.compute_wavelet_transform(tsd_rem.restrict(ep_ex_rem), fs=FS, freqs=freqs) ``` *** Now let's plot the calculated wavelet scalogram. - -```{code-cell} ipython3 +```python # Define wavelet decomposition plotting function def plot_timefrequency(freqs, powers, ax=None): im = ax.imshow(np.abs(powers), aspect="auto") @@ -163,15 +155,13 @@ Filtering Theta As expected, there is a strong 8Hz component during REM sleep. We can filter it using the function [`nap.apply_bandpass_filter`](pynapple.process.filtering.apply_bandpass_filter). - -```{code-cell} ipython3 +```python theta_band = nap.apply_bandpass_filter(tsd_rem, cutoff=(6.0, 10.0), fs=FS) ``` We can plot the original signal and the filtered signal. - -```{code-cell} ipython3 +```python plt.figure(constrained_layout=True, figsize=(12, 3)) plt.plot(tsd_rem.restrict(ep_ex_rem), alpha=0.5) plt.plot(theta_band.restrict(ep_ex_rem)) @@ -185,8 +175,7 @@ Computing phase From the filtered signal, it is easy to get the phase using the Hilbert transform. Here we use scipy Hilbert method. - -```{code-cell} ipython3 +```python from scipy import signal theta_phase = nap.Tsd(t=theta_band.t, d=np.angle(signal.hilbert(theta_band))) @@ -194,8 +183,7 @@ theta_phase = nap.Tsd(t=theta_band.t, d=np.angle(signal.hilbert(theta_band))) Let's plot the phase. - -```{code-cell} ipython3 +```python plt.figure(constrained_layout=True, figsize=(12, 3)) plt.subplot(211) plt.plot(tsd_rem.restrict(ep_ex_rem), alpha=0.5) @@ -215,46 +203,39 @@ of each of the units using the [`compute_1d_tuning_curves`](pynapple.process.tun We will start by throwing away cells which do not have a high enough firing rate during our interval. - -```{code-cell} ipython3 +```python spikes = spikes[spikes.rate > 5.0] ``` The feature is the theta phase during REM sleep. - -```{code-cell} ipython3 -phase_modulation = nap.compute_1d_tuning_curves( - group=spikes, feature=theta_phase, nb_bins=61, minmax=(-np.pi, np.pi) +```python +phase_modulation = nap.compute_tuning_curves( + group=spikes, features=theta_phase, bins=61, range=[(-np.pi, np.pi)] ) ``` Let's plot the first 3 neurons. - -```{code-cell} ipython3 -plt.figure(constrained_layout=True, figsize = (12, 3)) -for i in range(3): - plt.subplot(1,3,i+1) - plt.plot(phase_modulation.iloc[:,i]) - plt.xlabel("Phase (rad)") - plt.ylabel("Firing rate (Hz)") -plt.show() +```python +phase_modulation=phase_modulation.rename({"feature0": "Phase"}) +phase_modulation.name="Firing Rate" +phase_modulation.attrs["units"]="Hz" +phase_modulation.coords["Phase"].attrs["units"]="rad" +phase_modulation[:3].plot(row="unit", col_wrap=3, sharey=False) ``` There is clearly a strong modulation for the third neuron. Finally, we can use the function [`value_from`](pynapple.Ts.value_from) to align each spikes to the corresponding phase position and overlay it with the LFP. - -```{code-cell} ipython3 +```python spike_phase = spikes[spikes.index[3]].value_from(theta_phase) ``` Let's plot it. - -```{code-cell} ipython3 +```python plt.figure(constrained_layout=True, figsize=(12, 3)) plt.subplot(211) plt.plot(tsd_rem.restrict(ep_ex_rem), alpha=0.5) @@ -274,4 +255,4 @@ Kipp Freud (https://kippfreud.com/) Guillaume Viejo -::: \ No newline at end of file +::: From b82dd3a4042391ef2481d6e0fb526a27c99f6231 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 9 Jul 2025 20:00:04 +0000 Subject: [PATCH 025/244] more generalising docs for tuning curves --- doc/examples/tutorial_phase_preferences.md | 2 +- doc/user_guide/03_metadata.md | 136 +++++++++++++-------- pynapple/core/interval_set.py | 31 +++-- pynapple/core/ts_group.py | 18 +-- 4 files changed, 114 insertions(+), 73 deletions(-) diff --git a/doc/examples/tutorial_phase_preferences.md b/doc/examples/tutorial_phase_preferences.md index e42af88e7..a820fdc29 100644 --- a/doc/examples/tutorial_phase_preferences.md +++ b/doc/examples/tutorial_phase_preferences.md @@ -199,7 +199,7 @@ plt.show() Finding Phase of Spikes ----------------------- Now that we have the phase of our theta wavelet, and our spike times, we can find the phase firing preferences -of each of the units using the [`compute_1d_tuning_curves`](pynapple.process.tuning_curves.compute_1d_tuning_curves) function. +of each of the units using the [`compute_tuning_curves`](pynapple.process.tuning_curves.compute_tuning_curves) function. We will start by throwing away cells which do not have a high enough firing rate during our interval. diff --git a/doc/user_guide/03_metadata.md b/doc/user_guide/03_metadata.md index 6c1116ce9..f23e61564 100644 --- a/doc/user_guide/03_metadata.md +++ b/doc/user_guide/03_metadata.md @@ -1,16 +1,19 @@ --- -jupytext: - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.16.4 -kernelspec: - display_name: Python 3 - language: python - name: python3 +jupyter: + jupytext: + default_lexer: ipython3 + text_representation: + extension: .md + format_name: markdown + format_version: '1.3' + jupytext_version: 1.17.2 + kernelspec: + display_name: pynapple + language: python + name: python3 --- + # Metadata Metadata can be added to `TsGroup`, `IntervalSet`, and `TsdFrame` objects at initialization or after an object has been created. - `TsGroup` metadata is information associated with each Ts/Tsd object, such as brain region or unit type. @@ -26,10 +29,10 @@ At initialization, metadata can be passed via a dictionary or pandas DataFrame u - The `rate` attribute for `TsGroup` is stored with the metadata and cannot be overwritten. ``` -The length of the metadata must match the length of the object it describes (see class examples below for more detail). +The length of the metadata must match the length of the object it describes (see class examples below for more detail). + -```{code-cell} ipython3 -:tags: [hide-cell] +```python tags=["hide-cell"] import numpy as np import pandas as pd import pynapple as nap @@ -54,7 +57,8 @@ columns = ["a", "b", "c"] ### `TsGroup` Metadata added to `TsGroup` must match the number of `Ts`/`Tsd` objects, or the length of its `index` property. -```{code-cell} ipython3 + +```python metadata = {"region": ["pfc", "pfc", "hpc", "hpc"]} tsgroup = nap.TsGroup(group, metadata=metadata) @@ -62,7 +66,8 @@ print(tsgroup) ``` When initializing with a DataFrame, the index must align with the input dictionary keys (only when a dictionary is used to create the `TsGroup`). -```{code-cell} ipython3 + +```python metadata = pd.DataFrame( index=group.keys(), data=["pfc", "pfc", "hpc", "hpc"], @@ -73,11 +78,10 @@ tsgroup = nap.TsGroup(group, metadata=metadata) print(tsgroup) ``` - ### `IntervalSet` -Metadata added to `IntervalSet` must match the number of intervals, or the length of its `index` property. +Metadata added to `IntervalSet` must match the number of intervals, or the length of its `index` property. -```{code-cell} ipython3 +```python metadata = { "reward": [1, 0, 1], "choice": ["left", "right", "left"], @@ -87,7 +91,8 @@ print(intervalset) ``` Metadata can be initialized as a DataFrame using the metadata argument, or it can be inferred when initializing an `IntervalSet` with a DataFrame. -```{code-cell} ipython3 + +```python df = pd.DataFrame( data=[[0, 30, 1, "left"], [35, 65, 0, "right"], [70, 100, 1, "left"]], columns=["start", "end", "reward", "choice"] @@ -98,8 +103,9 @@ print(intervalset) ``` ### `TsdFrame` -Metadata added to `TsdFrame` must match the number of data columns, or the length of its `columns` property. -```{code-cell} ipython3 +Metadata added to `TsdFrame` must match the number of data columns, or the length of its `columns` property. + +```python metadata = { "color": ["red", "blue", "green"], "position": [10,20,30], @@ -111,7 +117,8 @@ print(tsdframe) ``` When initializing with a DataFrame, the DataFrame index must match the `TsdFrame` columns. -```{code-cell} ipython3 + +```python metadata = pd.DataFrame( index=["a", "b", "c"], data=[["red", 10, "x"], ["blue", 20, "x"], ["green", 30, "y"]], @@ -131,45 +138,52 @@ The remaining metadata examples will be shown on a `TsGroup` object; however, al ### `set_info` Metadata can be passed as a dictionary or pandas DataFrame as the first positional argument, or metadata can be passed as name-value keyword arguments. -```{code-cell} ipython3 + +```python tsgroup.set_info(unit_type=["multi", "single", "single", "single"]) print(tsgroup) ``` ### Using dictionary-like keys (square brackets) Most metadata names can set as a dictionary-like key (i.e. using square brackets). The only exceptions are for `IntervalSet`, where the names "start" and "end" are reserved for class properties. -```{code-cell} ipython3 + +```python tsgroup["depth"] = [0, 1, 2, 3] print(tsgroup) ``` ### Using attribute assignment If the metadata name is unique from other class attributes and methods, and it is formatted properly (i.e. only alpha-numeric characters and underscores), it can be set as an attribute (i.e. using a `.` followed by the metadata name). -```{code-cell} ipython3 + +```python tsgroup.label=["MUA", "good", "good", "good"] print(tsgroup) ``` ## Allowed data types As long as the length of the metadata container matches the length of the object (number of columns for `TsdFrame` and number of indices for `IntervalSet` and `TsGroup`), elements of the metadata can be any data type. -```{code-cell} ipython3 + +```python tsgroup.coords = [[1,0],[0,1],[1,1],[2,1]] print(tsgroup) ``` ## Accessing metadata Metadata is stored as a pandas DataFrame, which can be previewed using the `metadata` attribute. -```{code-cell} ipython3 + +```python print(tsgroup.metadata) ``` Single metadata columns (or lists of columns) can be retrieved using the [`get_info()`](pynapple.TsGroup.get_info) class method: -```{code-cell} ipython3 + +```python print(tsgroup.get_info("region")) ``` Similarly, metadata can be accessed using key indexing (i.e. square brakets) -```{code-cell} ipython3 + +```python print(tsgroup["region"]) ``` @@ -178,13 +192,15 @@ Metadata names must be strings. Key indexing with an integer will produce differ ``` Finally, metadata that can be set as an attribute can also be accessed as an attribute. -```{code-cell} ipython3 + +```python print(tsgroup.region) ``` ## Overwriting metadata User-set metadata is mutable and can be overwritten. -```{code-cell} ipython3 + +```python print(tsgroup, "\n") tsgroup.set_info(label=["A", "B", "C", "D"]) print(tsgroup) @@ -192,7 +208,8 @@ print(tsgroup) ## Dropping metadata To drop metadata, use the [`drop_info()`](pynapple.TsGroup.drop_info) method. Multiple metadata columns can be dropped by passing a list of metadata names. -```{code-cell} ipython3 + +```python print(tsgroup, "\n") tsgroup.drop_info("coords") print(tsgroup) @@ -200,67 +217,80 @@ print(tsgroup) ## Using metadata to slice objects Metadata can be used to slice or filter objects based on metadata values. -```{code-cell} ipython3 + +```python print(tsgroup[tsgroup.label == "A"]) ``` ## `groupby`: Using metadata to group objects Similar to pandas, metadata can be used to group objects based on one or more metadata columns using the object method [`groupby`](pynapple.TsGroup.groupby), where the first argument is the metadata columns name(s) to group by. This function returns a dictionary with keys corresponding to unique groups and values corresponding to object indices belonging to each group. -```{code-cell} ipython3 + +```python print(tsgroup,"\n") print(tsgroup.groupby("region")) ``` Grouping by multiple metadata columns should be passed as a list. -```{code-cell} ipython3 + +```python tsgroup.groupby(["region","unit_type"]) ``` The optional argument `get_group` can be provided to return a new object corresponding to a specific group. -```{code-cell} ipython3 + +```python tsgroup.groupby("region", get_group="hpc") ``` ## `groupby_apply`: Applying functions to object groups The `groupby_apply` object method allows a specific function to be applied to object groups. The first argument, same as `groupby`, is the metadata column(s) used to group the object. The second argument is the function to apply to each group. If only these two arguments are supplied, it is assumed that the grouped object is the first and only input to the applied function. This function returns a dictionary, where keys correspond to each unique group, and values correspond to the function output on each group. -```{code-cell} ipython3 + +```python print(tsdframe,"\n") print(tsdframe.groupby_apply("label", np.mean)) ``` If the applied function requires additional inputs, these can be passed as additional keyword arguments into `groupby_apply`. -```{code-cell} ipython3 + +```python feature = nap.Tsd(t=np.arange(100), d=np.repeat([0,1], 50)) tsgroup.groupby_apply( "region", - nap.compute_1d_tuning_curves, - feature=feature, - nb_bins=2) + nap.compute_tuning_curves, + features=feature, + bins=2) ``` Alternatively, an anonymous function can be passed instead that defines additional arguments. -```{code-cell} ipython3 -func = lambda x: nap.compute_1d_tuning_curves(x, feature=feature, nb_bins=2) + +```python +func = lambda x: nap.compute_tuning_curves(x, features=feature, bins=2) tsgroup.groupby_apply("region", func) ``` An anonymous function can also be used to apply a function where the grouped object is not the first input. -```{code-cell} ipython3 -func = lambda x: nap.compute_1d_tuning_curves( + +```python +func = lambda x: nap.compute_tuning_curves( group=tsgroup, - feature=feature, - nb_bins=2, - ep=x) + features=feature, + bins=2, + epochs=x) intervalset.groupby_apply("choice", func) ``` Alternatively, the optional parameter `input_key` can be passed to specify which keyword argument the grouped object corresponds to. Other required arguments of the applied function need to be passed as keyword arguments. -```{code-cell} ipython3 + +```python intervalset.groupby_apply( "choice", - nap.compute_1d_tuning_curves, - input_key="ep", + nap.compute_tuning_curves, + input_key="epochs", group=tsgroup, - feature=feature, - nb_bins=2) + features=feature, + bins=2) +``` + +```python + ``` diff --git a/pynapple/core/interval_set.py b/pynapple/core/interval_set.py index 59e51b835..5bdbaf3e1 100644 --- a/pynapple/core/interval_set.py +++ b/pynapple/core/interval_set.py @@ -1266,7 +1266,7 @@ def groupby_apply(self, by, func, input_key=None, **func_kwargs): Apply a numpy function: >>> ep.groupby_apply("l2", np.mean) - {'x': 6.75, 'y': 26.5} + {'x': np.float64(6.75), 'y': np.float64(26.5)} Apply a custom function: @@ -1289,16 +1289,23 @@ def groupby_apply(self, by, func, input_key=None, **func_kwargs): ... ) >>> feature = nap.Tsd(t=np.arange(40), d=np.concatenate([np.zeros(20), np.ones(20)])) >>> func_kwargs = { - >>> "group": tsg, - >>> "feature": feature, - >>> "nb_bins": 2, - >>> } - >>> ep.groupby_apply("l2", nap.compute_1d_tuning_curves, input_key="ep", **func_kwargs) - {'x': 1 2 3 - 0.25 1.025641 1.823362 4.216524 - 0.75 NaN NaN NaN, - 'y': 1 2 3 - 0.25 NaN NaN NaN - 0.75 1.025641 1.978022 4.835165} + ... "group": tsg, + ... "features": feature, + ... "bins": 2, + ... } + >>> ep.groupby_apply("l2", nap.compute_tuning_curves, input_key="epochs", **func_kwargs) + {'x': Size: 48B + array([[ nan, 1. ], + [ nan, 1.77777778], + [ nan, 4.11111111]]) + Coordinates: + * unit (unit) int64 24B 1 2 3 + * feature0 (feature0) float64 16B -0.25 0.25, 'y': Size: 48B + array([[ nan, 1. ], + [ nan, 1.92857143], + [ nan, 4.71428571]]) + Coordinates: + * unit (unit) int64 24B 1 2 3 + * feature0 (feature0) float64 16B 0.75 1.25} """ return _MetadataMixin.groupby_apply(self, by, func, input_key, **func_kwargs) diff --git a/pynapple/core/ts_group.py b/pynapple/core/ts_group.py index 4f3a3667b..a4c77c6b6 100644 --- a/pynapple/core/ts_group.py +++ b/pynapple/core/ts_group.py @@ -1925,12 +1925,16 @@ def groupby_apply(self, by, func, input_key=None, **func_kwargs): ... d=np.concatenate([np.zeros(20), np.ones(20)]), ... time_support=nap.IntervalSet(np.array([[0, 5], [10, 12], [20, 33]])), ... ) - >>> tsgroup.groupby_apply("l2", nap.compute_1d_tuning_curves, feature=feature, nb_bins=2) - {'x': 0 1 - 0.25 1.15 2.044444 - 0.75 1.15 2.217857, - 'y': 2 - 0.25 3.833333 - 0.75 4.353571} + >>> print(tsgroup.groupby_apply("l2", nap.compute_tuning_curves, features=feature, bins=2)) + {'x': Size: 32B + array([[1. , 1. ], + [1.77777778, 1.92857143]]) + Coordinates: + * unit (unit) int64 16B 0 1 + * feature0 (feature0) float64 16B 0.25 0.75, 'y': Size: 16B + array([[3.33333333, 3.78571429]]) + Coordinates: + * unit (unit) int64 8B 2 + * feature0 (feature0) float64 16B 0.25 0.75} """ return _MetadataMixin.groupby_apply(self, by, func, input_key, **func_kwargs) From 2b1398fe96dc881360b5638b56ef880ee2726036 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 9 Jul 2025 20:05:23 +0000 Subject: [PATCH 026/244] dandi tutorial --- doc/examples/tutorial_pynapple_dandi.md | 66 +++++++++++-------------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/doc/examples/tutorial_pynapple_dandi.md b/doc/examples/tutorial_pynapple_dandi.md index 2efd85aeb..ae7510baa 100644 --- a/doc/examples/tutorial_pynapple_dandi.md +++ b/doc/examples/tutorial_pynapple_dandi.md @@ -1,17 +1,18 @@ --- -jupytext: - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.16.4 -kernelspec: - display_name: Python 3 - language: python - name: python3 +jupyter: + jupytext: + default_lexer: ipython3 + text_representation: + extension: .md + format_name: markdown + format_version: '1.3' + jupytext_version: 1.17.2 + kernelspec: + display_name: pynapple + language: python + name: python3 --- - Streaming data from DANDI ========================= @@ -30,8 +31,7 @@ DANDI ----- DANDI allows you to stream data without downloading all the files. In this case the data extracted from the NWB file are stored in the nwb-cache folder. - -```{code-cell} ipython3 +```python from pynwb import NWBHDF5IO from dandi.dandiapi import DandiAPIClient @@ -71,8 +71,7 @@ Pynapple -------- If opening the NWB works, you can start streaming data straight into pynapple with the `NWBFile` class. - -```{code-cell} ipython3 +```python import pynapple as nap import matplotlib.pyplot as plt import seaborn as sns @@ -88,8 +87,7 @@ print(nwb) We can load the spikes as a TsGroup for inspection. - -```{code-cell} ipython3 +```python units = nwb["units"] print(units) @@ -97,36 +95,28 @@ print(units) As well as the position - -```{code-cell} ipython3 +```python position = nwb["SpatialSeriesLED1"] ``` Here we compute the 2d tuning curves - -```{code-cell} ipython3 -tc, binsxy = nap.compute_2d_tuning_curves(units, position, 20) +```python +tuning_curves = nap.compute_tuning_curves(units, position, 20) ``` Let's plot the tuning curves - -```{code-cell} ipython3 -plt.figure(figsize=(15, 7)) -for i in tc.keys(): - plt.subplot(2, 4, i + 1) - plt.imshow(tc[i], origin="lower", aspect="auto") - plt.title("Unit {}".format(i)) -plt.tight_layout() -plt.show() +```python +tuning_curves.name="Firing Rate" +tuning_curves.attrs["units"] = "Hz" +tuning_curves.plot(row="unit", col_wrap=4, figsize=(15, 7)) ``` -Let's plot the spikes of unit 1 who has a nice grid -Here I use the function [`value_from`](pynapple.Ts.value_from) to assign to each spike the closest position in time. - +Let's plot the spikes of unit 1, which has a nice grid. +Here, I use the [`value_from`](pynapple.Ts.value_from) function to assign to each spike the closest position in time. -```{code-cell} ipython3 +```python plt.figure(figsize=(15, 6)) plt.subplot(121) extent = ( @@ -135,7 +125,7 @@ extent = ( np.min(position["y"]), np.max(position["y"]), ) -plt.imshow(tc[1], origin="lower", extent=extent, aspect="auto") +plt.imshow(tuning_curves[1], origin="lower", extent=extent, aspect="auto") plt.xlabel("x") plt.ylabel("y") @@ -148,3 +138,7 @@ plt.ylabel("y") plt.tight_layout() plt.show() ``` + +```python + +``` From a50eb9932473aa1e2f6e33892f5e6dc23c630580 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 9 Jul 2025 20:25:07 +0000 Subject: [PATCH 027/244] allow passing a tuple range when 1D --- README.md | 2 +- doc/examples/tutorial_HD_dataset.md | 2 +- doc/examples/tutorial_phase_preferences.md | 2 +- doc/user_guide/06_tuning_curves.md | 4 ++-- doc/user_guide/07_decoding.md | 2 +- main.py | 2 +- pynapple/process/tuning_curves.py | 9 +++++++++ tests/test_decoding.py | 2 +- tests/test_tuning_curves_general.py | 22 ++++++++++++++++++++++ 9 files changed, 39 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 6771ead2e..bfcbd7a8a 100644 --- a/README.md +++ b/README.md @@ -131,7 +131,7 @@ wake_ep = data["position_time_support"] # COMPUTING TUNING CURVES tuning_curves = nap.compute_tuning_curves( - spikes, head_direction, 120, epochs=wake_ep, range=[(0, 2 * np.pi)] + spikes, head_direction, 120, epochs=wake_ep, range=(0, 2 * np.pi) ) # PLOT diff --git a/doc/examples/tutorial_HD_dataset.md b/doc/examples/tutorial_HD_dataset.md index 509b73094..184692d46 100644 --- a/doc/examples/tutorial_HD_dataset.md +++ b/doc/examples/tutorial_HD_dataset.md @@ -108,7 +108,7 @@ tuning_curves = nap.compute_tuning_curves( features=angle, bins=61, epochs=epochs[epochs.tags == "wake"], - range=[(0, 2 * np.pi)] + range=(0, 2 * np.pi) ) ``` diff --git a/doc/examples/tutorial_phase_preferences.md b/doc/examples/tutorial_phase_preferences.md index a820fdc29..31b2418fa 100644 --- a/doc/examples/tutorial_phase_preferences.md +++ b/doc/examples/tutorial_phase_preferences.md @@ -211,7 +211,7 @@ The feature is the theta phase during REM sleep. ```python phase_modulation = nap.compute_tuning_curves( - group=spikes, features=theta_phase, bins=61, range=[(-np.pi, np.pi)] + group=spikes, features=theta_phase, bins=61, range=(-np.pi, np.pi) ) ``` diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index 3a0edd84e..177ca40a2 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -112,7 +112,7 @@ tuning_curves_1d = nap.compute_tuning_curves( group=tsgroup, features=feature, bins=120, - range=[(0, 2*np.pi)] + range=(0, 2*np.pi) ) tuning_curves_1d ``` @@ -261,7 +261,7 @@ tuning_curves_1d = nap.compute_tuning_curves( group=tsdframe, features=feature, bins=120, - range=[(0, 2*np.pi)] + range=(0, 2*np.pi) ) tuning_curves_1d ``` diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index da7bcc8e9..baab285cd 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -69,7 +69,7 @@ To decode, we need to compute tuning curves in 1D. ```python tuning_curves_1d = nap.compute_tuning_curves( - tsgroup, feature, bins=61, range=[(0, 2 * np.pi)] + tsgroup, feature, bins=61, range=(0, 2 * np.pi) ) ``` diff --git a/main.py b/main.py index 9347539ab..f64a2a332 100644 --- a/main.py +++ b/main.py @@ -17,7 +17,7 @@ # COMPUTING TUNING CURVES tuning_curves = nap.compute_tuning_curves( - spikes, head_direction, 120, epochs=wake_ep, range=[(0, 2 * np.pi)] + spikes, head_direction, 120, epochs=wake_ep, range=(0, 2 * np.pi) ) # PLOT diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index bb954554b..dbd12d5c2 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -156,6 +156,15 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= if not isinstance(fs, (int, float)): raise TypeError("fs should be a number (int or float)") + # check range + if range is not None and isinstance(range, tuple): + if features.shape[1] == 1: + range = [range] + else: + raise ValueError( + "range should be a sequence of tuples, one for each feature." + ) + # occupancy occupancy, bin_edges = np.histogramdd(features, bins=bins, range=range) diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 05d0ed08b..421ac4e52 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -19,7 +19,7 @@ def get_testing_set_1d(): group = nap.TsGroup({i: nap.Ts(t=np.arange(0, 50) + 50 * i) for i in range(2)}) tc = ( nap.compute_tuning_curves( - group=group, features=feature, bins=2, range=[(-0.5, 1.5)] + group=group, features=feature, bins=2, range=(-0.5, 1.5) ) .to_pandas() .T diff --git a/tests/test_tuning_curves_general.py b/tests/test_tuning_curves_general.py index cac6832ce..99c7a9d16 100644 --- a/tests/test_tuning_curves_general.py +++ b/tests/test_tuning_curves_general.py @@ -119,6 +119,28 @@ def get_features(n, fs=10.0): {"epochs": nap.IntervalSet([0.0, 1000.0])}, does_not_raise(), ), + # range + ( + get_group(1), + get_features(2), + {"range": (0, 1)}, + pytest.raises( + ValueError, + match="range should be a sequence of tuples, one for each feature.", + ), + ), + ( + get_group(1), + get_features(1), + {"range": (0, 1)}, + does_not_raise(), + ), + ( + get_group(1), + get_features(1), + {"range": [(0, 1)]}, + does_not_raise(), + ), # fs ( get_group(1), From d11867539167119d8617274f488d2174f60c7058 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 10 Jul 2025 15:13:55 +0000 Subject: [PATCH 028/244] cleaning decode --- pynapple/process/decoding.py | 176 +++++++++++++++-------------------- 1 file changed, 77 insertions(+), 99 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 430e4403a..93507ee9e 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -264,9 +264,7 @@ def decode_2d(tuning_curves, group, ep, bin_size, xy, time_units="s", features=N return decoded, p -def decode( - tuning_curves, tuning_curve_bins, group, ep, bin_size, time_units="s", features=None -): +def decode(tuning_curves, group, epochs, bin_size, time_units="s", features=None): """ Performs Bayesian decoding over n-dimensional features. @@ -279,15 +277,13 @@ def decode( Parameters ---------- - tuning_curves : dict - Dictionary of tuning curves (one for each neuron). - tuning_curve_bins : list - List of lists containing the bin positions for each dimension of the tuning curves. + tuning_curves : xr.DataArray + Tuning curves as outputed by `compute_tuning_curves` (one for each neuron). group : TsGroup, TsdFrame or dict of Ts/Tsd object. A group of neurons with the same keys as tuning_curves dictionary. You may also pass a TsdFrame with smoothed rates (recommended). - ep : IntervalSet - The epoch on which decoding is computed + epochs : IntervalSet + The epochs on which decoding is computed bin_size : float Bin size. Default is second. Use the parameter time_units to change it. time_units : str, optional @@ -313,70 +309,52 @@ def decode( """ - if type(group) is nap.TsdFrame: - newgroup = group.restrict(ep) - numcells = newgroup.shape[1] + # check group + if isinstance(group, (dict, nap.TsGroup)): + numcells = len(group) - if len(tuning_curves) != numcells: + if tuning_curves.coords.dims["unit"] != numcells: raise RuntimeError("Different shapes for tuning_curves and group") - if not np.all( - np.array(list(tuning_curves.keys())) == np.array(newgroup.columns) - ): + if not np.all(tuning_curves.coords["unit"] == np.array(group.keys())): raise RuntimeError("Different indices for tuning curves and group keys") - count = group - - elif type(group) is nap.TsGroup: - newgroup = group.restrict(ep) - numcells = len(newgroup) + if isinstance(group, dict): + newgroup = nap.TsGroup(group, time_support=epochs) + count = newgroup.count(bin_size, epochs, time_units) + elif isinstance(group, nap.TsdFrame): + numcells = newgroup.shape[1] - if len(tuning_curves) != numcells: + if tuning_curves.coords.dims["unit"] != numcells: raise RuntimeError("Different shapes for tuning_curves and group") - if not np.all( - np.array(list(tuning_curves.keys())) == np.array(newgroup.keys()) - ): + if not np.all(tuning_curves.coords["unit"] == np.array(group.keys())): raise RuntimeError("Different indices for tuning curves and group keys") - count = newgroup.count(bin_size, ep, time_units) - - elif type(group) is dict: - newgroup = nap.TsGroup(group, time_support=ep) - count = newgroup.count(bin_size, ep, time_units) - + count = group + newgroup = group.restrict(epochs) else: raise RuntimeError("Unknown format for group") - if not isinstance(tuning_curve_bins, list | np.ndarray): - raise RuntimeError( - "tuning_curve_bins should be a list or array of feature bins." - ) - tuning_curve_bins = np.array(tuning_curve_bins) - if len(tuning_curve_bins) != list(tuning_curves.values())[0].ndim: - raise RuntimeError("Tuning curve shape and tuning curve bins do not match.") - - indexes = list(tuning_curves.keys()) - - # Occupancy + # occupancy if features is None: - occupancy = np.ones_like(tuning_curves[indexes[0]]).flatten() - elif isinstance(features, nap.TsdFrame | nap.Tsd): + occupancy = np.ones_like(tuning_curves[0]).flatten() + elif isinstance(features, (nap.TsdFrame, nap.Tsd)): if isinstance(features, nap.Tsd): - features = nap.TsdFrame(t=features.times(), d=features.values) - if features.ndim == 1: - features = features[:, np.newaxis] - if len(tuning_curve_bins) != features.shape[1]: - raise RuntimeError("Number of features and tuning_curve_bins do not match.") + features = nap.TsdFrame( + t=features.times(), d=features.values, time_support=epochs + ) + if tuning_curves.ndim - 1 != features.shape[1]: + raise RuntimeError("Number of features and tuning_curves do not match.") bins = [] - for i in range(len(tuning_curve_bins)): - diff = np.diff(tuning_curve_bins[i]) - _bins = tuning_curve_bins[i][:-1] - diff / 2 - _bins = np.hstack( - (_bins, [_bins[-1] + diff[-1], _bins[-1] + 2 * diff[-1]]) - ) # assuming the size of the last 2 bins is equal - bins.append(_bins) + # for i in range(len(tuning_curve_bins)): + # diff = np.diff(tuning_curve_bins[i]) + # _bins = tuning_curve_bins[i][:-1] - diff / 2 + # _bins = np.hstack( + # (_bins, [_bins[-1] + diff[-1], _bins[-1] + 2 * diff[-1]]) + # ) # assuming the size of the last 2 bins is equal + # bins.append(_bins) occupancy, _ = np.histogramdd(features, bins) occupancy = occupancy.flatten() @@ -402,46 +380,46 @@ def decode( p = p1 * p2 * p3 p = p / p.sum(1)[:, np.newaxis] - idxmax = np.argmax(p, 1) - - n_bins_per_feature = [ - tuning_curve_bins[i].shape[0] for i in range(len(tuning_curve_bins)) - ] - p = p.reshape( - p.shape[0], - *n_bins_per_feature, - ) - p = getattr(nap, f"Tsd{'Tensor' if p.ndim > 2 else 'Frame'}")( - t=count.index, - d=p, - time_support=ep, - ) - - idxmax = np.unravel_index(idxmax, n_bins_per_feature) - - if features is not None: - cols = features.columns - else: - cols = np.arange(len(tuning_curve_bins)) - - if len(tuning_curve_bins) == 1: - decoded = nap.Tsd( - t=count.index, - d=tuning_curve_bins[0][idxmax[0]], - time_support=ep, - ) - else: - decoded = nap.TsdFrame( - t=count.index, - d=np.stack( - [ - tuning_curve_bins[i][idxmax[i]] - for i in range(len(tuning_curve_bins)) - ], - axis=1, - ), - time_support=ep, - columns=cols, - ) - - return decoded, p + # idxmax = np.argmax(p, 1) + + # n_bins_per_feature = [ + # tuning_curve_bins[i].shape[0] for i in range(len(tuning_curve_bins)) + # ] + # p = p.reshape( + # p.shape[0], + # *n_bins_per_feature, + # ) + # p = getattr(nap, f"Tsd{'Tensor' if p.ndim > 2 else 'Frame'}")( + # t=count.index, + # d=p, + # time_support=ep, + # ) + + # idxmax = np.unravel_index(idxmax, n_bins_per_feature) + + # if features is not None: + # cols = features.columns + # else: + # cols = np.arange(len(tuning_curve_bins)) + + # if len(tuning_curve_bins) == 1: + # decoded = nap.Tsd( + # t=count.index, + # d=tuning_curve_bins[0][idxmax[0]], + # time_support=ep, + # ) + # else: + # decoded = nap.TsdFrame( + # t=count.index, + # d=np.stack( + # [ + # tuning_curve_bins[i][idxmax[i]] + # for i in range(len(tuning_curve_bins)) + # ], + # axis=1, + # ), + # time_support=ep, + # columns=cols, + # ) + + return p From 7cff2b86f100075464b88b2f1354160e9471be72 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 10 Jul 2025 16:50:33 +0000 Subject: [PATCH 029/244] integrate xarray --- pynapple/process/decoding.py | 116 ++++++++++---------- tests/test_decode_general.py | 199 ----------------------------------- tests/test_decoding.py | 110 +++++++++---------- 3 files changed, 104 insertions(+), 321 deletions(-) delete mode 100644 tests/test_decode_general.py diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 93507ee9e..545d79910 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -313,26 +313,25 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s", features=None if isinstance(group, (dict, nap.TsGroup)): numcells = len(group) - if tuning_curves.coords.dims["unit"] != numcells: + if tuning_curves.coords.sizes["unit"] != numcells: raise RuntimeError("Different shapes for tuning_curves and group") - if not np.all(tuning_curves.coords["unit"] == np.array(group.keys())): + if not np.all(tuning_curves.coords["unit"] == np.array(list(group.keys()))): raise RuntimeError("Different indices for tuning curves and group keys") if isinstance(group, dict): - newgroup = nap.TsGroup(group, time_support=epochs) - count = newgroup.count(bin_size, epochs, time_units) + group = nap.TsGroup(group, time_support=epochs) + count = group.count(bin_size, epochs, time_units) elif isinstance(group, nap.TsdFrame): - numcells = newgroup.shape[1] + numcells = group.shape[1] - if tuning_curves.coords.dims["unit"] != numcells: + if tuning_curves.coords.sizes["unit"] != numcells: raise RuntimeError("Different shapes for tuning_curves and group") - if not np.all(tuning_curves.coords["unit"] == np.array(group.keys())): + if not np.all(tuning_curves.coords["unit"] == group.columns): raise RuntimeError("Different indices for tuning curves and group keys") count = group - newgroup = group.restrict(epochs) else: raise RuntimeError("Unknown format for group") @@ -348,23 +347,25 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s", features=None raise RuntimeError("Number of features and tuning_curves do not match.") bins = [] - # for i in range(len(tuning_curve_bins)): - # diff = np.diff(tuning_curve_bins[i]) - # _bins = tuning_curve_bins[i][:-1] - diff / 2 - # _bins = np.hstack( - # (_bins, [_bins[-1] + diff[-1], _bins[-1] + 2 * diff[-1]]) - # ) # assuming the size of the last 2 bins is equal - # bins.append(_bins) - + for dim in tuning_curves.dims[1:]: + centers = tuning_curves.coords[dim].values + diffs = np.diff(centers) + edges = centers[:-1] - diffs / 2 + bins.append( + np.concatenate( + ( + edges, + [edges[-1] + diffs[-1], edges[-1] + 2 * diffs[-1]], + ) + ) + ) occupancy, _ = np.histogramdd(features, bins) occupancy = occupancy.flatten() else: - raise RuntimeError("Features should be a TsdFrame.") + raise RuntimeError("Features should be a TsdFrame or Tsd.") # Transforming to pure numpy array - tc = np.array([tuning_curves[i] for i in tuning_curves.keys()]) - tc = tc.reshape(tc.shape[0], np.prod(tc.shape[1:])) - tc = tc.T + tc = tuning_curves.values.reshape(tuning_curves.sizes["unit"], -1).T ct = count.values bin_size_s = nap.TsIndex.format_timestamps( np.array([bin_size], dtype=np.float64), time_units @@ -380,46 +381,35 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s", features=None p = p1 * p2 * p3 p = p / p.sum(1)[:, np.newaxis] - # idxmax = np.argmax(p, 1) - - # n_bins_per_feature = [ - # tuning_curve_bins[i].shape[0] for i in range(len(tuning_curve_bins)) - # ] - # p = p.reshape( - # p.shape[0], - # *n_bins_per_feature, - # ) - # p = getattr(nap, f"Tsd{'Tensor' if p.ndim > 2 else 'Frame'}")( - # t=count.index, - # d=p, - # time_support=ep, - # ) - - # idxmax = np.unravel_index(idxmax, n_bins_per_feature) - - # if features is not None: - # cols = features.columns - # else: - # cols = np.arange(len(tuning_curve_bins)) - - # if len(tuning_curve_bins) == 1: - # decoded = nap.Tsd( - # t=count.index, - # d=tuning_curve_bins[0][idxmax[0]], - # time_support=ep, - # ) - # else: - # decoded = nap.TsdFrame( - # t=count.index, - # d=np.stack( - # [ - # tuning_curve_bins[i][idxmax[i]] - # for i in range(len(tuning_curve_bins)) - # ], - # axis=1, - # ), - # time_support=ep, - # columns=cols, - # ) - - return p + idxmax = np.argmax(p, 1) + + p = p.reshape(p.shape[0], *tuning_curves.shape[1:]) + p = getattr(nap, f"Tsd{'Tensor' if p.ndim > 2 else 'Frame'}")( + t=count.index, + d=p, + time_support=epochs, + ) + + idxmax = np.unravel_index(idxmax, tuning_curves.shape[1:]) + + if tuning_curves.ndim == 2: + decoded = nap.Tsd( + t=count.index, + d=tuning_curves.coords[tuning_curves.dims[1]][idxmax[0]].values, + time_support=epochs, + ) + else: + decoded = nap.TsdFrame( + t=count.index, + d=np.stack( + [ + tuning_curves.coords[dim][idxmax[i]] + for i, dim in enumerate(tuning_curves.dims[1:]) + ], + axis=1, + ), + time_support=epochs, + columns=tuning_curves.dims[1:], + ) + + return decoded, p diff --git a/tests/test_decode_general.py b/tests/test_decode_general.py deleted file mode 100644 index 212149db4..000000000 --- a/tests/test_decode_general.py +++ /dev/null @@ -1,199 +0,0 @@ -import numpy as np -import pytest - -import pynapple as nap - - -def get_testing_set_1d(): - feature = nap.Tsd(t=np.arange(0, 100, 1), d=np.repeat(np.arange(0, 2), 50)) - group = nap.TsGroup({i: nap.Ts(t=np.arange(0, 50) + 50 * i) for i in range(2)}) - tc = nap.compute_1d_tuning_curves( - group=group, feature=feature, nb_bins=2, minmax=(-0.5, 1.5) - ) - tc_bins = tc.index.values[None, :] - tc = {i: tc[i] for i in tc.columns} - ep = nap.IntervalSet(start=0, end=100) - return feature, group, tc, tc_bins, ep - - -def test_decode_1d(): - feature, group, tc, tc_bins, ep = get_testing_set_1d() - decoded, proba = nap.decode(tc, tc_bins, group, ep, bin_size=1) - assert isinstance(decoded, nap.Tsd) - assert isinstance(proba, nap.TsdFrame) - np.testing.assert_array_almost_equal(feature.values, decoded.values) - assert len(decoded) == 100 - assert len(proba) == 100 - tmp = np.ones((100, 2)) - tmp[50:, 0] = 0.0 - tmp[0:50, 1] = 0.0 - np.testing.assert_array_almost_equal(proba.values, tmp) - - -def test_decode_1d_with_TsdFrame(): - feature, group, tc, tc_bins, ep = get_testing_set_1d() - count = group.count(bin_size=1, ep=ep) - decoded, proba = nap.decode(tc, tc_bins, count, ep, bin_size=1) - assert isinstance(decoded, nap.Tsd) - assert isinstance(proba, nap.TsdFrame) - np.testing.assert_array_almost_equal(feature.values, decoded.values) - assert len(decoded) == 100 - assert len(proba) == 100 - tmp = np.ones((100, 2)) - tmp[50:, 0] = 0.0 - tmp[0:50, 1] = 0.0 - np.testing.assert_array_almost_equal(proba.values, tmp) - - -def test_decode_1d_with_feature(): - feature, group, tc, tc_bins, ep = get_testing_set_1d() - decoded, proba = nap.decode(tc, tc_bins, group, ep, bin_size=1, features=feature) - np.testing.assert_array_almost_equal(feature.values, decoded.values) - assert isinstance(decoded, nap.Tsd) - assert isinstance(proba, nap.TsdFrame) - np.testing.assert_array_almost_equal(feature.values, decoded.values) - assert len(decoded) == 100 - assert len(proba) == 100 - tmp = np.ones((100, 2)) - tmp[50:, 0] = 0.0 - tmp[0:50, 1] = 0.0 - np.testing.assert_array_almost_equal(proba.values, tmp) - - -def test_decode_1d_with_dict(): - feature, group, tc, tc_bins, ep = get_testing_set_1d() - group = dict(group) - decoded, proba = nap.decode(tc, tc_bins, group, ep, bin_size=1, features=feature) - np.testing.assert_array_almost_equal(feature.values, decoded.values) - assert isinstance(decoded, nap.Tsd) - assert isinstance(proba, nap.TsdFrame) - np.testing.assert_array_almost_equal(feature.values, decoded.values) - assert len(decoded) == 100 - assert len(proba) == 100 - tmp = np.ones((100, 2)) - tmp[50:, 0] = 0.0 - tmp[0:50, 1] = 0.0 - np.testing.assert_array_almost_equal(proba.values, tmp) - - -def test_decode_1d_with_wrong_feature(): - feature, group, tc, tc_bins, ep = get_testing_set_1d() - with pytest.raises(RuntimeError) as e_info: - nap.decode(tc, tc_bins, group, ep, bin_size=1, features=[1, 2, 3]) - assert str(e_info.value) == "Features should be a TsdFrame." - - -def test_decode_with_time_units(): - feature, group, tc, tc_bins, ep = get_testing_set_1d() - for t, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): - decoded, proba = nap.decode(tc, tc_bins, group, ep, 1.0 * t, time_units=tu) - np.testing.assert_array_almost_equal(feature.values, decoded.values) - - -def test_decoded_raise_errors(): - feature, group, tc, tc_bins, ep = get_testing_set_1d() - with pytest.raises(Exception) as e_info: - nap.decode(tc, tc_bins, np.random.rand(10), ep, 1) - assert str(e_info.value) == "Unknown format for group" - - tc[2] = np.random.rand(2) - with pytest.raises(Exception) as e_info: - nap.decode(tc, tc_bins, group, ep, 1) - assert str(e_info.value) == "Different shapes for tuning_curves and group" - - feature, group, tc, tc_bins, ep = get_testing_set_1d() - tc = {k: values for k, values in zip(list(tc.keys())[::-1], tc.values())} - with pytest.raises(Exception) as e_info: - nap.decode(tc, tc_bins, group, ep, 1) - assert str(e_info.value) == "Different indices for tuning curves and group keys" - - -def get_testing_set_2d(): - features = nap.TsdFrame( - t=np.arange(0, 100, 1), - d=np.vstack( - (np.repeat(np.arange(0, 2), 50), np.tile(np.arange(0, 2), 50)) - ).T.astype(np.float64), - ) - group = nap.TsGroup( - { - 0: nap.Ts(np.arange(0, 50, 2)), - 1: nap.Ts(np.arange(1, 51, 2)), - 2: nap.Ts(np.arange(50, 100, 2)), - 3: nap.Ts(np.arange(51, 101, 2)), - } - ) - - tc, tc_bins = nap.compute_2d_tuning_curves( - group=group, features=features, nb_bins=2, minmax=(-0.5, 1.5, -0.5, 1.5) - ) - ep = nap.IntervalSet(start=0, end=100) - return features, group, tc, tc_bins, ep - - -def test_decode_2d(): - features, group, tc, tc_bins, ep = get_testing_set_2d() - decoded, proba = nap.decode(tc, tc_bins, group, ep, 1) - - assert isinstance(decoded, nap.TsdFrame) - assert isinstance(proba, nap.TsdTensor) - np.testing.assert_array_almost_equal(features.values, decoded.values) - assert len(decoded) == 100 - assert len(proba) == 100 - tmp = np.zeros((100, 2)) - tmp[0:50:2, 0] = 1 - tmp[50:100:2, 1] = 1 - np.testing.assert_array_almost_equal(proba[:, :, 0], tmp) - - tmp = np.zeros((100, 2)) - tmp[1:50:2, 0] = 1 - tmp[51:100:2, 1] = 1 - np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) - - -def test_decode_2d_with_TsdFrame(): - features, group, tc, tc_bins, ep = get_testing_set_2d() - count = group.count(bin_size=1, ep=ep) - decoded, proba = nap.decode(tc, tc_bins, count, ep, 1) - - assert isinstance(decoded, nap.TsdFrame) - assert isinstance(proba, nap.TsdTensor) - np.testing.assert_array_almost_equal(features.values, decoded.values) - assert len(decoded) == 100 - assert len(proba) == 100 - tmp = np.zeros((100, 2)) - tmp[0:50:2, 0] = 1 - tmp[50:100:2, 1] = 1 - np.testing.assert_array_almost_equal(proba[:, :, 0], tmp) - - tmp = np.zeros((100, 2)) - tmp[1:50:2, 0] = 1 - tmp[51:100:2, 1] = 1 - np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) - - -def test_decode_2d_with_dict(): - features, group, tc, tc_bins, ep = get_testing_set_2d() - group = dict(group) - decoded, proba = nap.decode(tc, tc_bins, group, ep, 1) - - assert isinstance(decoded, nap.TsdFrame) - assert isinstance(proba, nap.TsdTensor) - np.testing.assert_array_almost_equal(features.values, decoded.values) - assert len(decoded) == 100 - assert len(proba) == 100 - tmp = np.zeros((100, 2)) - tmp[0:50:2, 0] = 1 - tmp[50:100:2, 1] = 1 - np.testing.assert_array_almost_equal(proba[:, :, 0], tmp) - - tmp = np.zeros((100, 2)) - tmp[1:50:2, 0] = 1 - tmp[51:100:2, 1] = 1 - np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) - - -def test_decode_2d_with_feature(): - features, group, tc, tc_bins, ep = get_testing_set_2d() - decoded, proba = nap.decode(tc, tc_bins, group, ep, 1) - np.testing.assert_array_almost_equal(features.values, decoded.values) diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 421ac4e52..8b2ab07b0 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -8,8 +8,8 @@ """Tests of decoding for `pynapple` package.""" import numpy as np -import pandas as pd import pytest +import xarray as xr import pynapple as nap @@ -17,20 +17,16 @@ def get_testing_set_1d(): feature = nap.Tsd(t=np.arange(0, 100, 1), d=np.repeat(np.arange(0, 2), 50)) group = nap.TsGroup({i: nap.Ts(t=np.arange(0, 50) + 50 * i) for i in range(2)}) - tc = ( - nap.compute_tuning_curves( - group=group, features=feature, bins=2, range=(-0.5, 1.5) - ) - .to_pandas() - .T + tc = nap.compute_tuning_curves( + group=group, features=feature, bins=2, range=(-0.5, 1.5) ) - ep = nap.IntervalSet(start=0, end=100) - return feature, group, tc, ep + epochs = nap.IntervalSet(start=0, end=100) + return feature, group, tc, epochs def test_decode_1d(): - feature, group, tc, ep = get_testing_set_1d() - decoded, proba = nap.decode_1d(tc, group, ep, bin_size=1) + feature, group, tc, epochs = get_testing_set_1d() + decoded, proba = nap.decode(tc, group, epochs, bin_size=1) assert isinstance(decoded, nap.Tsd) assert isinstance(proba, nap.TsdFrame) np.testing.assert_array_almost_equal(feature.values, decoded.values) @@ -43,9 +39,9 @@ def test_decode_1d(): def test_decode_1d_with_TsdFrame(): - feature, group, tc, ep = get_testing_set_1d() - count = group.count(bin_size=1, ep=ep) - decoded, proba = nap.decode_1d(tc, count, ep, bin_size=1) + feature, group, tc, epochs = get_testing_set_1d() + count = group.count(bin_size=1, ep=epochs) + decoded, proba = nap.decode(tc, count, epochs, bin_size=1) assert isinstance(decoded, nap.Tsd) assert isinstance(proba, nap.TsdFrame) np.testing.assert_array_almost_equal(feature.values, decoded.values) @@ -58,9 +54,8 @@ def test_decode_1d_with_TsdFrame(): def test_decode_1d_with_feature(): - feature, group, tc, ep = get_testing_set_1d() - decoded, proba = nap.decode_1d(tc, group, ep, bin_size=1, feature=feature) - np.testing.assert_array_almost_equal(feature.values, decoded.values) + feature, group, tc, epochs = get_testing_set_1d() + decoded, proba = nap.decode(tc, group, epochs, bin_size=1, features=feature) assert isinstance(decoded, nap.Tsd) assert isinstance(proba, nap.TsdFrame) np.testing.assert_array_almost_equal(feature.values, decoded.values) @@ -73,10 +68,9 @@ def test_decode_1d_with_feature(): def test_decode_1d_with_dict(): - feature, group, tc, ep = get_testing_set_1d() + feature, group, tc, epochs = get_testing_set_1d() group = dict(group) - decoded, proba = nap.decode_1d(tc, group, ep, bin_size=1, feature=feature) - np.testing.assert_array_almost_equal(feature.values, decoded.values) + decoded, proba = nap.decode(tc, group, epochs, bin_size=1, features=feature) assert isinstance(decoded, nap.Tsd) assert isinstance(proba, nap.TsdFrame) np.testing.assert_array_almost_equal(feature.values, decoded.values) @@ -89,35 +83,35 @@ def test_decode_1d_with_dict(): def test_decode_1d_with_wrong_feature(): - feature, group, tc, ep = get_testing_set_1d() + feature, group, tc, epochs = get_testing_set_1d() with pytest.raises(RuntimeError) as e_info: - nap.decode_1d(tc, group, ep, bin_size=1, feature=[1, 2, 3]) - assert str(e_info.value) == "Unknown format for feature in decode_1d" + nap.decode(tc, group, epochs, bin_size=1, features=[1, 2, 3]) + assert str(e_info.value) == "Features should be a TsdFrame or Tsd." def test_decode_1d_with_time_units(): - feature, group, tc, ep = get_testing_set_1d() + feature, group, tc, epochs = get_testing_set_1d() for t, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): - decoded, proba = nap.decode_1d(tc, group, ep, 1.0 * t, time_units=tu) + decoded, proba = nap.decode(tc, group, epochs, 1.0 * t, time_units=tu) np.testing.assert_array_almost_equal(feature.values, decoded.values) def test_decoded_1d_raise_errors(): - feature, group, tc, ep = get_testing_set_1d() + feature, group, tc, epochs = get_testing_set_1d() with pytest.raises(Exception) as e_info: - nap.decode_1d(tc, np.random.rand(10), ep, 1) + nap.decode(tc, np.random.rand(10), epochs, 1) assert str(e_info.value) == "Unknown format for group" - feature, group, tc, ep = get_testing_set_1d() - tc[2] = np.random.rand(2) + feature, group, tc, epochs = get_testing_set_1d() + _tc = xr.DataArray(data=np.random.rand(10, 3), dims=["time", "unit"]) with pytest.raises(Exception) as e_info: - nap.decode_1d(tc, group, ep, 1) + nap.decode(_tc, group, epochs, 1) assert str(e_info.value) == "Different shapes for tuning_curves and group" - feature, group, tc, ep = get_testing_set_1d() - tc.columns = [0, 2] + feature, group, tc, epochs = get_testing_set_1d() + tc.coords["unit"] = [0, 2] with pytest.raises(Exception) as e_info: - nap.decode_1d(tc, group, ep, 1) + nap.decode(tc, group, epochs, 1) assert str(e_info.value) == "Different indices for tuning curves and group keys" @@ -138,18 +132,16 @@ def get_testing_set_2d(): tc = nap.compute_tuning_curves( group=group, features=features, bins=2, range=[(-0.5, 1.5), (-0.5, 1.5)] ) - xy = [tc.coords[dim].values for dim in tc.coords if dim != "unit"] - tc = {c: tc.sel(unit=c).values for c in tc.coords["unit"].values} - ep = nap.IntervalSet(start=0, end=100) - return features, group, tc, ep, tuple(xy) + epochs = nap.IntervalSet(start=0, end=100) + return features, group, tc, epochs def test_decode_2d(): - features, group, tc, ep, xy = get_testing_set_2d() - decoded, proba = nap.decode_2d(tc, group, ep, 1, xy) + features, group, tc, epochs = get_testing_set_2d() + decoded, proba = nap.decode(tc, group, epochs, 1) assert isinstance(decoded, nap.TsdFrame) - assert isinstance(proba, np.ndarray) + assert isinstance(proba, nap.TsdTensor) np.testing.assert_array_almost_equal(features.values, decoded.values) assert len(decoded) == 100 assert len(proba) == 100 @@ -165,12 +157,12 @@ def test_decode_2d(): def test_decode_2d_with_TsdFrame(): - features, group, tc, ep, xy = get_testing_set_2d() - count = group.count(bin_size=1, ep=ep) - decoded, proba = nap.decode_2d(tc, count, ep, 1, xy) + features, group, tc, epochs = get_testing_set_2d() + count = group.count(bin_size=1, ep=epochs) + decoded, proba = nap.decode(tc, count, epochs, 1) assert isinstance(decoded, nap.TsdFrame) - assert isinstance(proba, np.ndarray) + assert isinstance(proba, nap.TsdTensor) np.testing.assert_array_almost_equal(features.values, decoded.values) assert len(decoded) == 100 assert len(proba) == 100 @@ -186,12 +178,12 @@ def test_decode_2d_with_TsdFrame(): def test_decode_2d_with_dict(): - features, group, tc, ep, xy = get_testing_set_2d() + features, group, tc, epochs = get_testing_set_2d() group = dict(group) - decoded, proba = nap.decode_2d(tc, group, ep, 1, xy) + decoded, proba = nap.decode(tc, group, epochs, 1) assert isinstance(decoded, nap.TsdFrame) - assert isinstance(proba, np.ndarray) + assert isinstance(proba, nap.TsdTensor) np.testing.assert_array_almost_equal(features.values, decoded.values) assert len(decoded) == 100 assert len(proba) == 100 @@ -207,32 +199,32 @@ def test_decode_2d_with_dict(): def test_decode_2d_with_feature(): - features, group, tc, ep, xy = get_testing_set_2d() - decoded, proba = nap.decode_2d(tc, group, ep, 1, xy) + features, group, tc, epochs = get_testing_set_2d() + decoded, proba = nap.decode(tc, group, epochs, 1) np.testing.assert_array_almost_equal(features.values, decoded.values) def test_decode_2d_with_time_units(): - features, group, tc, ep, xy = get_testing_set_2d() + features, group, tc, epochs = get_testing_set_2d() for t, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): - decoded, proba = nap.decode_2d(tc, group, ep, 1.0 * t, xy, time_units=tu) + decoded, proba = nap.decode(tc, group, epochs, 1.0 * t, time_units=tu) np.testing.assert_array_almost_equal(features.values, decoded.values) def test_decoded_2d_raise_errors(): - features, group, tc, ep, xy = get_testing_set_2d() + features, group, tc, epochs = get_testing_set_2d() with pytest.raises(Exception) as e_info: - nap.decode_2d(tc, np.random.rand(10), ep, 1, xy) + nap.decode(tc, np.random.rand(10), epochs, 1) assert str(e_info.value) == "Unknown format for group" - features, group, tc, ep, xy = get_testing_set_2d() - tc[5] = np.random.rand(2, 2) + features, group, tc, epochs = get_testing_set_2d() + tc = xr.DataArray(data=np.random.rand(10, 3), dims=["time", "unit"]) with pytest.raises(Exception) as e_info: - nap.decode_2d(tc, group, ep, 1, xy) + nap.decode(tc, group, epochs, 1) assert str(e_info.value) == "Different shapes for tuning_curves and group" - features, group, tc, ep, xy = get_testing_set_2d() - tc = {k: tc[i] for k, i in zip(np.arange(0, 40, 10), tc.keys())} + features, group, tc, epochs = get_testing_set_2d() + tc.coords["unit"] = [0, 2, 4, 6] with pytest.raises(Exception) as e_info: - nap.decode_2d(tc, group, ep, 1, xy) + nap.decode(tc, group, epochs, 1) assert str(e_info.value) == "Different indices for tuning curves and group keys" From d89ecefdcf24837a848c26c8edeae1c88fb86517 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 10 Jul 2025 17:52:46 +0000 Subject: [PATCH 030/244] fix xarray issue --- pynapple/process/decoding.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 545d79910..de22f6056 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -313,7 +313,7 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s", features=None if isinstance(group, (dict, nap.TsGroup)): numcells = len(group) - if tuning_curves.coords.sizes["unit"] != numcells: + if tuning_curves.sizes["unit"] != numcells: raise RuntimeError("Different shapes for tuning_curves and group") if not np.all(tuning_curves.coords["unit"] == np.array(list(group.keys()))): @@ -325,7 +325,7 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s", features=None elif isinstance(group, nap.TsdFrame): numcells = group.shape[1] - if tuning_curves.coords.sizes["unit"] != numcells: + if tuning_curves.sizes["unit"] != numcells: raise RuntimeError("Different shapes for tuning_curves and group") if not np.all(tuning_curves.coords["unit"] == group.columns): From d2b14661a8508ff4aae1af420a3d24e61535a1cf Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 10 Jul 2025 19:58:52 +0000 Subject: [PATCH 031/244] add feature_names argument + store occupancy and bin_edges --- doc/user_guide/06_tuning_curves.md | 112 ++++++++++++----------- main.py | 9 +- pynapple/process/tuning_curves.py | 45 ++++++++- tests/test_tuning_curves_general.py | 136 ++++++++++++++++++++++++++++ 4 files changed, 241 insertions(+), 61 deletions(-) diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index 177ca40a2..771c162ad 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -1,43 +1,42 @@ --- -jupyter: - jupytext: - default_lexer: ipython3 - text_representation: - extension: .md - format_name: markdown - format_version: '1.3' - jupytext_version: 1.17.2 - kernelspec: - display_name: pynapple - language: python - name: python3 +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.16.4 +kernelspec: + display_name: Python 3 + language: python + name: python3 --- # Tuning curves -Pynapple can compute 1-dimensional tuning curves -(for example firing rate as a function of angular direction) -and 2-dimensional tuning curves (for example firing rate as a function -of position). It can also compute average firing rate for different -epochs (for example firing rate for different epochs of stimulus presentation). +Pynapple can compute n-dimensional tuning curves +(for example, firing rate as a function of 1D angular direction or firing rate as a function of 2D position). +It can also compute average firing rate for different epochs (for example firing rate for different epochs of stimulus presentation). -```python tags=["hide-cell"] +```{code-cell} ipython3 +:tags: [hide-cell] import pynapple as nap import numpy as np import matplotlib.pyplot as plt import seaborn as sns +import xarray as xr from pprint import pprint custom_params = {"axes.spines.right": False, "axes.spines.top": False} sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params) +xr.set_options(display_expand_attrs=False) ``` -```python tags=["hide-cell"] +```{code-cell} ipython3 +:tags: [hide-cell] group = { 0: nap.Ts(t=np.sort(np.random.uniform(0, 100, 10))), 1: nap.Ts(t=np.sort(np.random.uniform(0, 100, 20))), 2: nap.Ts(t=np.sort(np.random.uniform(0, 100, 30))), } - tsgroup = nap.TsGroup(group) ``` @@ -48,20 +47,19 @@ tsgroup = nap.TsGroup(group) The epochs should be stored in a dictionnary: -```python +```{code-cell} ipython3 dict_ep = { "stim0": nap.IntervalSet(start=0, end=20), "stim1":nap.IntervalSet(start=30, end=70) - } +} ``` [`nap.compute_discrete_tuning_curves`](pynapple.process.tuning_curves.compute_discrete_tuning_curves) takes a `TsGroup` for spiking activity and a dictionary of epochs. The output is a pandas DataFrame where each column is a unit in the `TsGroup` and each row is one `IntervalSet` type. The value is the mean firing rate of the neuron during this set of intervals. -```python +```{code-cell} ipython3 mean_fr = nap.compute_discrete_tuning_curves(tsgroup, dict_ep) - pprint(mean_fr) ``` @@ -69,7 +67,8 @@ pprint(mean_fr) ### 1-dimensional tuning curves -```python tags=["hide-cell"] +```{code-cell} ipython3 +:tags: [hide-cell] from scipy.ndimage import gaussian_filter1d # Fake Tuning curves @@ -100,26 +99,28 @@ tsgroup = nap.TsGroup( Mandatory arguments are `TsGroup`, `Tsd` (or `TsdFrame` with 1 column only) and `bins` for number of bins of the tuning curves. -If an `IntervalSet` is passed with `epochs`, everything is restricted to `epochs` +If an `IntervalSet` is passed with `epochs`, everything is restricted to `epochs`, otherwise the time support of the feature is used. -The min and max of the tuning curve is by default the min and max of the feature. This can be tweaked with the argument `range`. +The min and max of the tuning curve is by default the min and max of the feature. +This can be tweaked with the `range` argument. The output is an `xarray.DataArray` with a unit and feature dimension. +The `feature_names` argument allows for setting feature names and units. -```python +```{code-cell} ipython3 tuning_curves_1d = nap.compute_tuning_curves( group=tsgroup, features=feature, bins=120, - range=(0, 2*np.pi) + range=(0, 2*np.pi), + feature_names=[("head direction", "rad")] ) tuning_curves_1d ``` -```python -tuning_curves_1d.plot.line(x="feature0", add_legend=False) -plt.xlabel("Feature space") +```{code-cell} ipython3 +tuning_curves_1d.plot.line(x="head direction", add_legend=False) plt.ylabel("Firing rate (Hz)") plt.show() ``` @@ -127,7 +128,8 @@ plt.show() Internally, the function is calling the method [`value_from`](pynapple.Tsd.value_from) which maps timestamps to their closest values in time from a `Tsd` object. It is then possible to validate the tuning curves by displaying the timestamps as well as their associated values. -```python tags=["hide-input"] +```{code-cell} ipython3 +:tags: [hide-input] plt.figure() plt.subplot(121) plt.plot(tsgroup[3].value_from(feature), 'o') @@ -136,7 +138,7 @@ plt.ylabel("Feature") plt.xlim(0, 2) plt.xlabel("Time (s)") plt.subplot(122) -plt.plot(tuning_curves_1d[3].values, tuning_curves_1d.coords["feature0"], label="Tuning curve (unit=3)") +plt.plot(tuning_curves_1d[3].values, tuning_curves_1d.coords["head direction"], label="Tuning curve (unit=3)") plt.xlabel("Firing rate (Hz)") plt.legend() plt.show() @@ -144,7 +146,8 @@ plt.show() ### 2-dimensional tuning curves -```python tags=["hide-cell"] +```{code-cell} ipython3 +:tags: [hide-cell] dt = 0.01 T = 10 epoch = nap.IntervalSet(start=0, end=T, time_units="s") @@ -168,12 +171,13 @@ The `features` argument must be a 2-columns `TsdFrame` object. `bins` can be an int or a tuple of 2 ints. `range` can be a list of two `(min, max)` tuples. -```python +```{code-cell} ipython3 tuning_curves_2d = nap.compute_tuning_curves( group=tsgroup, features=features, bins=(5,5), - range=[(-1, 1), (-1, 1)] + range=[(-1, 1), (-1, 1)], + feature_names=["x", "y"] ) tuning_curves_2d ``` @@ -183,15 +187,15 @@ Bins that have never been visited by the feature have been assigned a NaN value. Checking the accuracy of the tuning curves can be bone by displaying the spikes aligned to the features with the function `value_from` which assign to each spikes the corresponding features value for unit 0. -```python +```{code-cell} ipython3 ts_to_features = tsgroup[0].value_from(features) print(ts_to_features) ``` `tsgroup[0]` which is a `Ts` object has been transformed to a `TsdFrame` object with each timestamps (spike times) being associated with a features value. -```python tags=["hide-input"] - +```{code-cell} ipython3 +:tags: [hide-input] plt.figure() plt.subplot(121) plt.plot(features["b"], features["a"], label="features") @@ -226,7 +230,8 @@ Tuning curves computed in the following matter are usually made with data from c ### 1-dimensional tuning curves -```python tags=["hide-cell"] +```{code-cell} ipython3 +:tags: [hide-cell] from scipy.ndimage import gaussian_filter1d # Fake Tuning curves @@ -256,26 +261,27 @@ tsdframe = nap.TsdFrame( The same function `nap.compute_tuning_curves` can also take a `TsdFrame` (for example continuous calcium data) as input. -```python +```{code-cell} ipython3 tuning_curves_1d = nap.compute_tuning_curves( group=tsdframe, features=feature, bins=120, - range=(0, 2*np.pi) + range=(0, 2*np.pi), + feature_names=[("head direction", "rad")] ) tuning_curves_1d ``` -```python -tuning_curves_1d.plot.line(x="feature0", add_legend=False) -plt.xlabel("Feature space") +```{code-cell} ipython3 +tuning_curves_1d.plot.line(x="head direction", add_legend=False) plt.ylabel("Firing rate (Hz)") plt.show() ``` ### 2-dimensional tuning curves -```python tags=["hide-cell"] +```{code-cell} ipython3 +:tags: [hide-cell] dt = 0.01 T = 10 epoch = nap.IntervalSet(start=0, end=T, time_units="s") @@ -296,16 +302,18 @@ tsdframe = nap.TsdFrame( ) ``` -```python +```{code-cell} ipython3 tuning_curves_2d = nap.compute_tuning_curves( group=tsdframe, features=features, - bins=5, + bins=5, + feature_names=["x", "y"] ) tuning_curves_2d ``` -```python +```{code-cell} ipython3 +:tags: [hide-input] plt.figure() plt.subplot(121) plt.plot(features["b"], features["a"], label="features") @@ -332,7 +340,3 @@ plt.colorbar() plt.tight_layout() plt.show() ``` - -```python - -``` diff --git a/main.py b/main.py index f64a2a332..9528a5f0a 100644 --- a/main.py +++ b/main.py @@ -17,7 +17,12 @@ # COMPUTING TUNING CURVES tuning_curves = nap.compute_tuning_curves( - spikes, head_direction, 120, epochs=wake_ep, range=(0, 2 * np.pi) + spikes, + head_direction, + 120, + epochs=wake_ep, + range=(0, 2 * np.pi), + feature_names=[("head direction", "rad")], ) # PLOT @@ -25,6 +30,4 @@ row="unit", col_wrap=5, subplot_kws={"projection": "polar"}, sharey=False ) plt.xticks([0, np.pi / 2, np.pi, 3 * np.pi / 2]) -g.set_titles("") -g.set_xlabels("") plt.show() diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index dbd12d5c2..f3e316e63 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -82,7 +82,9 @@ def wrapper(*args, **kwargs): return wrapper -def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs=None): +def compute_tuning_curves( + group, features, bins=10, range=None, epochs=None, fs=None, feature_names=None +): """ Computes n-dimensional tuning curves relative to n features. @@ -112,6 +114,12 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= fs : float, optional The exact sampling frequency of the features used to normalise the tuning curves. Unit should match that of the features. If not passed, it is estimated. + feature_names : sequence, optional + A sequence of names (and optionally units) for the features. + If not passed, the column names in `features` are used. + If those are not set, they are set to `feature0`, `feature1`, etc. + You can also pass a list of tuples [(name, unit), ...] to set both names + and units in the resulting xarray.DataArray. Returns ------- @@ -141,6 +149,29 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= elif not isinstance(features, nap.TsdFrame): raise TypeError("features should be a Tsd or TsdFrame.") + # check feature names + if feature_names is None: + _feature_names = features.columns + _feature_units = [None] * len(_feature_names) + else: + if not isinstance(feature_names, list): + raise TypeError("feature_names should be a sequence of strings or tuples.") + if len(feature_names) != features.shape[1]: + raise ValueError("feature_names should match the number of features.") + _feature_names = [] + _feature_units = [] + for feature in feature_names: + if isinstance(feature, str): + _feature_names.append(feature) + _feature_units.append(None) + elif isinstance(feature, tuple) and len(feature) == 2: + _feature_names.append(feature[0]) + _feature_units.append(feature[1]) + else: + raise TypeError( + "feature_names should be a sequence of strings or tuples of strings." + ) + # check epochs if epochs is None: epochs = features.time_support @@ -200,11 +231,17 @@ def compute_tuning_curves(group, features, bins=10, range=None, epochs=None, fs= coords={ "unit": keys, **{ - (f"feature{feature}" if isinstance(feature, int) else feature): e[:-1] - + np.diff(e) / 2 - for feature, e in zip(features.columns, bin_edges) + str(feature_name): ( + str(feature_name), + e[:-1] + np.diff(e) / 2, + {} if unit is None else {"units": unit}, + ) + for feature_name, unit, e in zip( + _feature_names, _feature_units, bin_edges + ) }, }, + attrs={"occupancy": occupancy, "bin_edges": bin_edges}, ) diff --git a/tests/test_tuning_curves_general.py b/tests/test_tuning_curves_general.py index 99c7a9d16..040a989ac 100644 --- a/tests/test_tuning_curves_general.py +++ b/tests/test_tuning_curves_general.py @@ -166,6 +166,75 @@ def get_features(n, fs=10.0): {"fs": 1.0}, does_not_raise(), ), + # feature names + ( + get_group(1), + get_features(1), + {"feature_names": "feature0"}, + pytest.raises( + TypeError, + match="feature_names should be a sequence of strings or tuples.", + ), + ), + ( + get_group(1), + get_features(1), + {"feature_names": 0}, + pytest.raises( + TypeError, + match="feature_names should be a sequence of strings or tuples.", + ), + ), + ( + get_group(1), + get_features(1), + {"feature_names": ["feature0"]}, + does_not_raise(), + ), + ( + get_group(1), + get_features(1), + {"feature_names": ["feature0", "feature1"]}, + pytest.raises( + ValueError, match="feature_names should match the number of features." + ), + ), + ( + get_group(1), + get_features(1), + {"feature_names": [1]}, + pytest.raises( + TypeError, + match="feature_names should be a sequence of strings or tuples of strings.", + ), + ), + ( + get_group(1), + get_features(1), + {"feature_names": [(1,)]}, + pytest.raises( + TypeError, + match="feature_names should be a sequence of strings or tuples of strings.", + ), + ), + ( + get_group(1), + get_features(1), + {"feature_names": [("feature0", "x")]}, + does_not_raise(), + ), + ( + get_group(1), + get_features(2), + {"feature_names": [("feature0", "x"), "feature1"]}, + does_not_raise(), + ), + ( + get_group(1), + get_features(2), + {"feature_names": [("feature0", "unit0"), ("feature1", "unit1")]}, + does_not_raise(), + ), ], ) def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation): @@ -443,6 +512,73 @@ def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation) coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, ), ), + # single unit, single feature, specified feature name + ( + get_group(1), + get_features(1), + {"feature_names": ["f0"]}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "f0"], + coords={"unit": [1], "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # single unit, single feature, specified feature name and unit + ( + get_group(1), + get_features(1), + {"feature_names": [("f0", "unit0")]}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "f0"], + coords={ + "unit": [1], + "f0": ( + "f0", + np.linspace(0, 9.9, 11)[:-1] + 0.495, + {"units": "unit0"}, + ), + }, + ), + ), + # single unit, multiple features, specified feature names + ( + get_group(1), + get_features(2), + {"feature_names": ["f0", "f1"]}, + xr.DataArray( + np.where(np.eye(10), 10.0, np.nan)[None, :], + dims=["unit", "f0", "f1"], + coords={ + "unit": [1], + "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + "f1": np.linspace(0, 19.8, 11)[:-1] + 0.99, + }, + ), + ), + # single unit, multiple features, specified feature names and units + ( + get_group(1), + get_features(2), + {"feature_names": [("f0", "unit0"), ("f1", "unit1")]}, + xr.DataArray( + np.where(np.eye(10), 10.0, np.nan)[None, :], + dims=["unit", "f0", "f1"], + coords={ + "unit": [1], + "f0": ( + "f0", + np.linspace(0, 9.9, 11)[:-1] + 0.495, + {"units": "unit0"}, + ), + "f1": ( + "f1", + np.linspace(0, 19.8, 11)[:-1] + 0.99, + {"units": "unit1"}, + ), + }, + ), + ), ], ) def test_compute_tuning_curves(group, features, kwargs, expected): From 8adef8071a0d3adbdce1a1c156d0b147bb732319 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 10 Jul 2025 20:16:18 +0000 Subject: [PATCH 032/244] fixing notebooks --- doc/user_guide/03_metadata.md | 79 ++++++++++++++---------------- doc/user_guide/06_tuning_curves.md | 10 ++-- 2 files changed, 42 insertions(+), 47 deletions(-) diff --git a/doc/user_guide/03_metadata.md b/doc/user_guide/03_metadata.md index f23e61564..cc1cbf83a 100644 --- a/doc/user_guide/03_metadata.md +++ b/doc/user_guide/03_metadata.md @@ -1,16 +1,14 @@ --- -jupyter: - jupytext: - default_lexer: ipython3 - text_representation: - extension: .md - format_name: markdown - format_version: '1.3' - jupytext_version: 1.17.2 - kernelspec: - display_name: pynapple - language: python - name: python3 +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.16.4 +kernelspec: + display_name: Python 3 + language: python + name: python3 --- @@ -32,7 +30,8 @@ At initialization, metadata can be passed via a dictionary or pandas DataFrame u The length of the metadata must match the length of the object it describes (see class examples below for more detail). -```python tags=["hide-cell"] +```{code-cell} ipython3 +:tags: [hide-cell] import numpy as np import pandas as pd import pynapple as nap @@ -58,7 +57,7 @@ columns = ["a", "b", "c"] ### `TsGroup` Metadata added to `TsGroup` must match the number of `Ts`/`Tsd` objects, or the length of its `index` property. -```python +```{code-cell} ipython3 metadata = {"region": ["pfc", "pfc", "hpc", "hpc"]} tsgroup = nap.TsGroup(group, metadata=metadata) @@ -67,7 +66,7 @@ print(tsgroup) When initializing with a DataFrame, the index must align with the input dictionary keys (only when a dictionary is used to create the `TsGroup`). -```python +```{code-cell} ipython3 metadata = pd.DataFrame( index=group.keys(), data=["pfc", "pfc", "hpc", "hpc"], @@ -81,7 +80,7 @@ print(tsgroup) ### `IntervalSet` Metadata added to `IntervalSet` must match the number of intervals, or the length of its `index` property. -```python +```{code-cell} ipython3 metadata = { "reward": [1, 0, 1], "choice": ["left", "right", "left"], @@ -92,7 +91,7 @@ print(intervalset) Metadata can be initialized as a DataFrame using the metadata argument, or it can be inferred when initializing an `IntervalSet` with a DataFrame. -```python +```{code-cell} ipython3 df = pd.DataFrame( data=[[0, 30, 1, "left"], [35, 65, 0, "right"], [70, 100, 1, "left"]], columns=["start", "end", "reward", "choice"] @@ -105,7 +104,7 @@ print(intervalset) ### `TsdFrame` Metadata added to `TsdFrame` must match the number of data columns, or the length of its `columns` property. -```python +```{code-cell} ipython3 metadata = { "color": ["red", "blue", "green"], "position": [10,20,30], @@ -118,7 +117,7 @@ print(tsdframe) When initializing with a DataFrame, the DataFrame index must match the `TsdFrame` columns. -```python +```{code-cell} ipython3 metadata = pd.DataFrame( index=["a", "b", "c"], data=[["red", 10, "x"], ["blue", 20, "x"], ["green", 30, "y"]], @@ -139,7 +138,7 @@ The remaining metadata examples will be shown on a `TsGroup` object; however, al ### `set_info` Metadata can be passed as a dictionary or pandas DataFrame as the first positional argument, or metadata can be passed as name-value keyword arguments. -```python +```{code-cell} ipython3 tsgroup.set_info(unit_type=["multi", "single", "single", "single"]) print(tsgroup) ``` @@ -147,7 +146,7 @@ print(tsgroup) ### Using dictionary-like keys (square brackets) Most metadata names can set as a dictionary-like key (i.e. using square brackets). The only exceptions are for `IntervalSet`, where the names "start" and "end" are reserved for class properties. -```python +```{code-cell} ipython3 tsgroup["depth"] = [0, 1, 2, 3] print(tsgroup) ``` @@ -155,7 +154,7 @@ print(tsgroup) ### Using attribute assignment If the metadata name is unique from other class attributes and methods, and it is formatted properly (i.e. only alpha-numeric characters and underscores), it can be set as an attribute (i.e. using a `.` followed by the metadata name). -```python +```{code-cell} ipython3 tsgroup.label=["MUA", "good", "good", "good"] print(tsgroup) ``` @@ -163,7 +162,7 @@ print(tsgroup) ## Allowed data types As long as the length of the metadata container matches the length of the object (number of columns for `TsdFrame` and number of indices for `IntervalSet` and `TsGroup`), elements of the metadata can be any data type. -```python +```{code-cell} ipython3 tsgroup.coords = [[1,0],[0,1],[1,1],[2,1]] print(tsgroup) ``` @@ -171,19 +170,19 @@ print(tsgroup) ## Accessing metadata Metadata is stored as a pandas DataFrame, which can be previewed using the `metadata` attribute. -```python +```{code-cell} ipython3 print(tsgroup.metadata) ``` Single metadata columns (or lists of columns) can be retrieved using the [`get_info()`](pynapple.TsGroup.get_info) class method: -```python +```{code-cell} ipython3 print(tsgroup.get_info("region")) ``` Similarly, metadata can be accessed using key indexing (i.e. square brakets) -```python +```{code-cell} ipython3 print(tsgroup["region"]) ``` @@ -193,14 +192,14 @@ Metadata names must be strings. Key indexing with an integer will produce differ Finally, metadata that can be set as an attribute can also be accessed as an attribute. -```python +```{code-cell} ipython3 print(tsgroup.region) ``` ## Overwriting metadata User-set metadata is mutable and can be overwritten. -```python +```{code-cell} ipython3 print(tsgroup, "\n") tsgroup.set_info(label=["A", "B", "C", "D"]) print(tsgroup) @@ -209,7 +208,7 @@ print(tsgroup) ## Dropping metadata To drop metadata, use the [`drop_info()`](pynapple.TsGroup.drop_info) method. Multiple metadata columns can be dropped by passing a list of metadata names. -```python +```{code-cell} ipython3 print(tsgroup, "\n") tsgroup.drop_info("coords") print(tsgroup) @@ -218,41 +217,41 @@ print(tsgroup) ## Using metadata to slice objects Metadata can be used to slice or filter objects based on metadata values. -```python +```{code-cell} ipython3 print(tsgroup[tsgroup.label == "A"]) ``` ## `groupby`: Using metadata to group objects Similar to pandas, metadata can be used to group objects based on one or more metadata columns using the object method [`groupby`](pynapple.TsGroup.groupby), where the first argument is the metadata columns name(s) to group by. This function returns a dictionary with keys corresponding to unique groups and values corresponding to object indices belonging to each group. -```python +```{code-cell} ipython3 print(tsgroup,"\n") print(tsgroup.groupby("region")) ``` Grouping by multiple metadata columns should be passed as a list. -```python +```{code-cell} ipython3 tsgroup.groupby(["region","unit_type"]) ``` The optional argument `get_group` can be provided to return a new object corresponding to a specific group. -```python +```{code-cell} ipython3 tsgroup.groupby("region", get_group="hpc") ``` ## `groupby_apply`: Applying functions to object groups The `groupby_apply` object method allows a specific function to be applied to object groups. The first argument, same as `groupby`, is the metadata column(s) used to group the object. The second argument is the function to apply to each group. If only these two arguments are supplied, it is assumed that the grouped object is the first and only input to the applied function. This function returns a dictionary, where keys correspond to each unique group, and values correspond to the function output on each group. -```python +```{code-cell} ipython3 print(tsdframe,"\n") print(tsdframe.groupby_apply("label", np.mean)) ``` If the applied function requires additional inputs, these can be passed as additional keyword arguments into `groupby_apply`. -```python +```{code-cell} ipython3 feature = nap.Tsd(t=np.arange(100), d=np.repeat([0,1], 50)) tsgroup.groupby_apply( "region", @@ -263,14 +262,14 @@ tsgroup.groupby_apply( Alternatively, an anonymous function can be passed instead that defines additional arguments. -```python +```{code-cell} ipython3 func = lambda x: nap.compute_tuning_curves(x, features=feature, bins=2) tsgroup.groupby_apply("region", func) ``` An anonymous function can also be used to apply a function where the grouped object is not the first input. -```python +```{code-cell} ipython3 func = lambda x: nap.compute_tuning_curves( group=tsgroup, features=feature, @@ -281,7 +280,7 @@ intervalset.groupby_apply("choice", func) Alternatively, the optional parameter `input_key` can be passed to specify which keyword argument the grouped object corresponds to. Other required arguments of the applied function need to be passed as keyword arguments. -```python +```{code-cell} ipython3 intervalset.groupby_apply( "choice", nap.compute_tuning_curves, @@ -290,7 +289,3 @@ intervalset.groupby_apply( features=feature, bins=2) ``` - -```python - -``` diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index 771c162ad..7903f906a 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -114,13 +114,13 @@ tuning_curves_1d = nap.compute_tuning_curves( features=feature, bins=120, range=(0, 2*np.pi), - feature_names=[("head direction", "rad")] + feature_names=["feature"] ) tuning_curves_1d ``` ```{code-cell} ipython3 -tuning_curves_1d.plot.line(x="head direction", add_legend=False) +tuning_curves_1d.plot.line(x="feature", add_legend=False) plt.ylabel("Firing rate (Hz)") plt.show() ``` @@ -138,7 +138,7 @@ plt.ylabel("Feature") plt.xlim(0, 2) plt.xlabel("Time (s)") plt.subplot(122) -plt.plot(tuning_curves_1d[3].values, tuning_curves_1d.coords["head direction"], label="Tuning curve (unit=3)") +plt.plot(tuning_curves_1d[3].values, tuning_curves_1d.coords["feature"], label="Tuning curve (unit=3)") plt.xlabel("Firing rate (Hz)") plt.legend() plt.show() @@ -267,13 +267,13 @@ tuning_curves_1d = nap.compute_tuning_curves( features=feature, bins=120, range=(0, 2*np.pi), - feature_names=[("head direction", "rad")] + feature_names=["feature"] ) tuning_curves_1d ``` ```{code-cell} ipython3 -tuning_curves_1d.plot.line(x="head direction", add_legend=False) +tuning_curves_1d.plot.line(x="feature", add_legend=False) plt.ylabel("Firing rate (Hz)") plt.show() ``` From ca461653f738a3b06a13071ff3f4bda32cca827c Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 10 Jul 2025 20:34:51 +0000 Subject: [PATCH 033/244] fix decoding notebook --- doc/user_guide/07_decoding.md | 71 +++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 33 deletions(-) diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index baab285cd..58bffd738 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -1,21 +1,20 @@ --- -jupyter: - jupytext: - default_lexer: ipython3 - text_representation: - extension: .md - format_name: markdown - format_version: '1.3' - jupytext_version: 1.17.2 - kernelspec: - display_name: pynapple - language: python - name: python3 +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.16.4 +kernelspec: + display_name: Python 3 + language: python + name: python3 --- # Decoding -```python tags=["hide-cell"] +```{code-cell} ipython3 +:tags: [hide-cell] import pynapple as nap import numpy as np import pandas as pd @@ -26,7 +25,8 @@ sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_par ``` -Pynapple supports 1 dimensional and 2 dimensional bayesian decoding. The function returns the decoded feature as well as the probabilities for each timestamps. +Pynapple supports n-dimensional bayesian decoding. +The function returns the decoded feature as well as the probabilities for each timestamp. :::{hint} @@ -36,7 +36,8 @@ Input to the bayesian decoding functions always include the tuning curves comput ## 1-dimensional decoding -```python tags=["hide-cell"] +```{code-cell} ipython3 +:tags: [hide-cell] from scipy.ndimage import gaussian_filter1d # Fake Tuning curves @@ -67,26 +68,27 @@ epoch = nap.IntervalSet(0, 10) To decode, we need to compute tuning curves in 1D. -```python +```{code-cell} ipython3 tuning_curves_1d = nap.compute_tuning_curves( - tsgroup, feature, bins=61, range=(0, 2 * np.pi) + tsgroup, feature, bins=61, range=(0, 2 * np.pi), feature_names=["feature"] ) ``` We can display the tuning curves of each neurons -```python tags=["hide-input"] -tuning_curves_1d.plot.line(x="feature0", add_legend=False) -plt.xlabel("Feature position") -plt.ylabel("Rate (Hz)") +```{code-cell} ipython3 +:tags: [hide-input] +tuning_curves_1d.name = "Firing rate" +tuning_curves_1d.attrs["unit"] = "Hz" +tuning_curves_1d.plot.line(x="feature", add_legend=False) plt.show() ``` `nap.decode_1d` performs bayesian decoding: -```python +```{code-cell} ipython3 decoded, proba_feature = nap.decode_1d( - tuning_curves=tcurves_1d.to_pandas().T, # 1D tuning curves + tuning_curves=tuning_curves_1d.to_pandas().T, # 1D tuning curves group=tsgroup, # Spiking activity ep=epoch, # Small epoch bin_size=0.06, # How to bin the spike trains @@ -96,7 +98,8 @@ decoded, proba_feature = nap.decode_1d( `decoded` is `Tsd` object containing the decoded feature value. `proba_feature` is a `TsdFrame` containing the probabilities of being in a particular feature bin over time. -```python tags=["hide-input"] +```{code-cell} ipython3 +:tags: [hide-input] plt.figure(figsize=(12, 6)) plt.subplot(211) plt.plot(feature.restrict(epoch), label="True") @@ -112,7 +115,8 @@ plt.show() ## 2-dimensional decoding -```python tags=["hide-cell"] +```{code-cell} ipython3 +:tags: [hide-cell] dt = 0.1 epochs = nap.IntervalSet(start=0, end=1000, time_units="s") features = np.vstack((np.cos(np.arange(0, 1000, dt)), np.sin(np.arange(0, 1000, dt)))).T @@ -138,7 +142,7 @@ ts_group = nap.TsGroup(ts_group, time_support=epoch) To decode, we need to compute tuning curves in 2D. -```python +```{code-cell} ipython3 tuning_curves_2d = nap.compute_tuning_curves( group=ts_group, # Spiking activity of 12 neurons features=features, # 2-dimensional features @@ -150,13 +154,17 @@ tuning_curves_2d = nap.compute_tuning_curves( We can display the tuning curves of each neuron -```python +```{code-cell} ipython3 +:tags: [hide-input] +tuning_curves_2d.name = "Firing rate" +tuning_curves_2d.attrs["unit"] = "Hz" tuning_curves_2d.plot(row="unit", col_wrap=6) +plt.show() ``` `nap.decode_2d` performs bayesian decoding: -```python +```{code-cell} ipython3 tcs = {c: tuning_curves_2d.sel(unit=c).values for c in tuning_curves_2d.coords["unit"].values} bins = [tuning_curves_2d.coords[dim].values for dim in tuning_curves_2d.coords if dim != "unit"] @@ -170,7 +178,8 @@ decoded, proba_feature = nap.decode_2d( ) ``` -```python tags=["hide-input"] +```{code-cell} ipython3 +:tags: [hide-input] plt.figure(figsize=(15, 5)) plt.subplot(131) plt.plot(features["a"].get(0,20), label="True") @@ -201,7 +210,3 @@ plt.legend() plt.tight_layout() plt.show() ``` - -```python - -``` From 5618514f09998fc4e54576355694dce9c7cb0d29 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 10 Jul 2025 21:03:41 +0000 Subject: [PATCH 034/244] hd tutorial --- doc/examples/tutorial_HD_dataset.md | 135 +++++++++++++++------------- 1 file changed, 74 insertions(+), 61 deletions(-) diff --git a/doc/examples/tutorial_HD_dataset.md b/doc/examples/tutorial_HD_dataset.md index 184692d46..c415b2f23 100644 --- a/doc/examples/tutorial_HD_dataset.md +++ b/doc/examples/tutorial_HD_dataset.md @@ -1,16 +1,14 @@ --- -jupyter: - jupytext: - default_lexer: ipython3 - text_representation: - extension: .md - format_name: markdown - format_version: '1.3' - jupytext_version: 1.17.2 - kernelspec: - display_name: pynapple - language: python - name: python3 +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.16.4 +kernelspec: + display_name: Python 3 + language: python + name: python3 --- Analysing head-direction cells @@ -20,7 +18,8 @@ This tutorial demonstrates how we use Pynapple to generate Figure 4a in the [pub The NWB file for the example is hosted on [OSF](https://osf.io/jb2gd). We show below how to stream it. The entire dataset can be downloaded [here](https://dandiarchive.org/dandiset/000056). -```python +```{code-cell} ipython3 +:tags: [hide-output] import scipy import pandas as pd import numpy as np @@ -32,6 +31,7 @@ import xarray as xr custom_params = {"axes.spines.right": False, "axes.spines.top": False} sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params) +xr.set_options(display_expand_attrs=False) ``` *** @@ -40,7 +40,7 @@ Downloading the data It's a small NWB file. -```python +```{code-cell} ipython3 path = "Mouse32-140822.nwb" if path not in os.listdir("."): r = requests.get(f"https://osf.io/jb2gd/download", stream=True) @@ -54,15 +54,15 @@ if path not in os.listdir("."): Parsing the data ------------------ -The first step is to load the data and other relevant variables of interest +The first step is to load the data and other relevant variables of interest. -```python +```{code-cell} ipython3 data = nap.load_file(path) # Load the NWB file for this dataset ``` What does this look like? -```python +```{code-cell} ipython3 print(data) ``` @@ -70,9 +70,10 @@ print(data) Head-Direction Tuning Curves ------------------ -To plot Head-Direction Tuning curves, we need the spike timings and the orientation of the animal. These quantities are stored in the variables 'units' and 'ry'. +To plot head-direction tuning curves, we need the spike timings and the orientation of the animal. +These quantities are stored in the variables 'units' and 'ry'. -```python +```{code-cell} ipython3 spikes = data["units"] # Get spike timings epochs = data["epochs"] # Get the behavioural epochs (in this case, sleep and wakefulness) angle = data["ry"] # Get the tracked orientation of the animal @@ -80,56 +81,54 @@ angle = data["ry"] # Get the tracked orientation of the animal What does this look like? -```python +```{code-cell} ipython3 print(spikes) ``` Here, rate is the mean firing rate of the unit. Location indicates the brain region the unit was recorded from, and group refers to the shank number on which the cell was located. -This dataset contains units recorded from the anterior thalamus. Head-direction (HD) cells are found in the anterodorsal nucleus of the thalamus (henceforth referred to as ADn). Units were also recorded from nearby thalamic nuclei in this animal. For the purposes of our tutorial, we are interested in the units recorded in ADn. We can restrict ourselves to analysis of these units rather easily, using Pynapple. +This dataset contains units recorded from the anterior thalamus. Head-direction (HD) cells are found in the anterodorsal nucleus of the thalamus (henceforth referred to as ADn). +Units were also recorded from nearby thalamic nuclei in this animal. +For the purposes of our tutorial, we are interested in the units recorded in ADn. +We can restrict ourselves to analysis of these units rather easily, using Pynapple. -```python +```{code-cell} ipython3 spikes_adn = spikes.getby_category("location")["adn"] # Select only those units that are in ADn -``` - -What does this look like? - -```python print(spikes_adn) ``` -Let's compute some head-direction tuning curves. To do this in Pynapple, all you need is a single line of code! +Let's compute some head-direction tuning curves. +To do this in Pynapple, all you need is a single line of code! -Plot firing rate of ADn units as a function of heading direction, i.e. a head-direction tuning curve +Let's plot firing rate of ADn units as a function of heading direction, i.e. a head-direction tuning curve: -```python +```{code-cell} ipython3 tuning_curves = nap.compute_tuning_curves( group=spikes_adn, features=angle, bins=61, epochs=epochs[epochs.tags == "wake"], - range=(0, 2 * np.pi) + range=(0, 2 * np.pi), + feature_names=["head_direction"] ) -``` - -What does this look like? - -```python tuning_curves ``` -It is an `xarray.DataArray` with one dimension representing units, and another for head-direction angles. +The output is an `xarray.DataArray` with one dimension representing units, and another for head-direction angles. Let's compute the preferred angle quickly as follows: -```python -pref_ang = tuning_curves.idxmax(dim="feature0") +```{code-cell} ipython3 +pref_ang = tuning_curves.idxmax(dim="head_direction") ``` -For easier visualization, we will colour our plots according to the preferred angle of the cell. To do so, we will normalize the range of angles we have, over a colourmap. +For easier visualization, we will color our plots according to the preferred angle of the cell. +To do so, we will normalize the range of angles we have, over a colormap. -```python -norm = plt.Normalize() # Normalizes data into the range [0,1] -color = plt.cm.hsv(norm([i / (2 * np.pi) for i in pref_ang])) # Assigns a colour in the HSV colourmap for each value of preferred angle +```{code-cell} ipython3 +# Normalizes data into the range [0,1] +norm = plt.Normalize() +# Assigns a color in the HSV colormap for each value of preferred angle +color = plt.cm.hsv(norm([i / (2 * np.pi) for i in pref_ang.values])) color = xr.DataArray( color, dims=("unit", "color"), @@ -139,23 +138,32 @@ color = xr.DataArray( To make the tuning curves look nice, we will smooth them before plotting: -```python +```{code-cell} ipython3 from scipy.ndimage import gaussian_filter1d -tmp = np.concatenate((tuning_curves.values, tuning_curves.values, tuning_curves.values), axis=1) +tmp = np.concatenate( + [ + tuning_curves.values, + tuning_curves.values, + tuning_curves.values + ], + axis=1) tmp = gaussian_filter1d(tmp, sigma=3, axis=1) tuning_curves.values = tmp[:, tuning_curves.shape[1]:2*tuning_curves.shape[1]] ``` -What does this look like? Let's plot the tuning curves! +What does this look like? Let's plot them! -```python +```{code-cell} ipython3 sorted_tuning_curves = tuning_curves.sortby(pref_ang) plt.figure(figsize=(12, 9)) -for i, n in enumerate(tuning_curves.unit.values): - plt.subplot(8, 4, i + 1, projection='polar') # Plot the curves in 8 rows and 4 columns +for i, n in enumerate(sorted_tuning_curves.coords["unit"]): + # Plot the curves in 8 rows and 4 columns + plt.subplot(8, 4, i + 1, projection='polar') plt.plot( - sorted_tuning_curves.coords["feature0"], sorted_tuning_curves.sel(unit=n).values, color=color.sel(unit=n).values + sorted_tuning_curves.coords["head_direction"], + sorted_tuning_curves.sel(unit=n).values, + color=color.sel(unit=n).values ) # Colour of the curves determined by preferred angle plt.xticks([]) plt.show() @@ -168,16 +176,18 @@ Awesome! Decoding ------------------ -Now that we have HD tuning curves, we can go one step further. Using only the population activity of ADn units, we can decode the direction the animal is looking in. We will then compare this to the real head direction of the animal, and discover that population activity in the ADn indeed codes for HD. +Now that we have HD tuning curves, we can go one step further. Using only the population activity of ADn units, we can decode the direction the animal is looking in. +We will then compare this to the real head-direction of the animal, and discover that population activity in the ADn indeed codes for HD. -To decode the population activity, we will be using a Bayesian Decoder as implemented in Pynapple. Just a single line of code! +To decode the population activity, we will be using a bayesian decoder as implemented in Pynapple. +Again, just a single line of code! -```python +```{code-cell} ipython3 print(tuning_curves.to_pandas().T) print(spikes_adn) ``` -```python +```{code-cell} ipython3 decoded, proba_feature = nap.decode_1d( tuning_curves=tuning_curves.to_pandas().T, group=spikes_adn, @@ -187,15 +197,15 @@ decoded, proba_feature = nap.decode_1d( ) ``` -What does this look like ? +What does this look like? -```python +```{code-cell} ipython3 print(decoded) ``` -The variable 'decoded' indicates the most probable angle in which the animal was looking. There is another variable, 'proba_feature' that denotes the probability of a given angular bin at a given time point. We can look at it below: +The variable 'decoded' contains the most probable angle, and 'proba_feature' that contains the probability of a given angular bin at a given time point: -```python +```{code-cell} ipython3 print(proba_feature) ``` @@ -203,7 +213,7 @@ Each row is a time bin, and each column is an angular bin. The sum of all values Now, let's plot the raster plot for a given period of time, and overlay the actual and decoded HD on the population activity. -```python +```{code-cell} ipython3 ep = nap.IntervalSet( start=10717, end=10730 ) # Select an arbitrary interval for plotting @@ -220,6 +230,7 @@ plt.plot( plt.legend(loc="upper left") plt.xlabel("Time (s)") plt.ylabel("Neurons") +plt.show() ``` From this plot, we can see that the decoder is able to estimate the head-direction based on the population activity in ADn. Amazing! @@ -227,7 +238,7 @@ From this plot, we can see that the decoder is able to estimate the head-directi What does the probability distribution in this example event look like? Ideally, the bins with the highest probability will correspond to the bins having the most spikes. Let's plot the probability matrix to visualize this. -```python +```{code-cell} ipython3 smoothed = scipy.ndimage.gaussian_filter( proba_feature, 1 ) # Smoothening the probability distribution @@ -261,10 +272,12 @@ plt.imshow( plt.xlabel("Time (s)") # X-axis is time in seconds plt.ylabel("Angle (rad)") # Y-axis is the angle in radian plt.colorbar(label="probability") +plt.show() ``` -From this probability distribution, we observe that the decoded HD very closely matches the actual HD. Therefore, the population activity in ADn is a reliable estimate of the heading direction of the animal. +From this probability distribution, we observe that the decoded HD closely matches the actual HD. +Hence, the population activity in ADn is a reliable estimate of the heading direction of the animal. I hope this tutorial was helpful. If you have any questions, comments or suggestions, please feel free to reach out to the Pynapple Team! From c08cc2394636617b63a55cffd29565a502d1efa6 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 10 Jul 2025 21:13:08 +0000 Subject: [PATCH 035/244] dandi example --- doc/examples/tutorial_pynapple_dandi.md | 56 +++++++++++-------------- pynapple/process/tuning_curves.py | 8 ++-- 2 files changed, 28 insertions(+), 36 deletions(-) diff --git a/doc/examples/tutorial_pynapple_dandi.md b/doc/examples/tutorial_pynapple_dandi.md index ae7510baa..1dbace0c0 100644 --- a/doc/examples/tutorial_pynapple_dandi.md +++ b/doc/examples/tutorial_pynapple_dandi.md @@ -1,16 +1,14 @@ --- -jupyter: - jupytext: - default_lexer: ipython3 - text_representation: - extension: .md - format_name: markdown - format_version: '1.3' - jupytext_version: 1.17.2 - kernelspec: - display_name: pynapple - language: python - name: python3 +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.16.4 +kernelspec: + display_name: Python 3 + language: python + name: python3 --- Streaming data from DANDI @@ -31,7 +29,7 @@ DANDI ----- DANDI allows you to stream data without downloading all the files. In this case the data extracted from the NWB file are stored in the nwb-cache folder. -```python +```{code-cell} ipython3 from pynwb import NWBHDF5IO from dandi.dandiapi import DandiAPIClient @@ -63,15 +61,14 @@ fs = CachingFileSystem( # next, open the file file = h5py.File(fs.open(s3_url, "rb")) io = NWBHDF5IO(file=file, load_namespaces=True) - -print(io) +io ``` Pynapple -------- If opening the NWB works, you can start streaming data straight into pynapple with the `NWBFile` class. -```python +```{code-cell} ipython3 import pynapple as nap import matplotlib.pyplot as plt import seaborn as sns @@ -81,42 +78,41 @@ custom_params = {"axes.spines.right": False, "axes.spines.top": False} sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params) nwb = nap.NWBFile(io.read()) - -print(nwb) +nwb ``` We can load the spikes as a TsGroup for inspection. -```python +```{code-cell} ipython3 units = nwb["units"] - -print(units) +units ``` -As well as the position +As well as the position: -```python +```{code-cell} ipython3 position = nwb["SpatialSeriesLED1"] ``` -Here we compute the 2d tuning curves +Here, we compute the 2d tuning curves: -```python +```{code-cell} ipython3 tuning_curves = nap.compute_tuning_curves(units, position, 20) ``` -Let's plot the tuning curves +Let's plot the tuning curves: -```python +```{code-cell} ipython3 tuning_curves.name="Firing Rate" tuning_curves.attrs["units"] = "Hz" tuning_curves.plot(row="unit", col_wrap=4, figsize=(15, 7)) +plt.show() ``` Let's plot the spikes of unit 1, which has a nice grid. Here, I use the [`value_from`](pynapple.Ts.value_from) function to assign to each spike the closest position in time. -```python +```{code-cell} ipython3 plt.figure(figsize=(15, 6)) plt.subplot(121) extent = ( @@ -138,7 +134,3 @@ plt.ylabel("y") plt.tight_layout() plt.show() ``` - -```python - -``` diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index f3e316e63..d66982824 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -90,9 +90,9 @@ def compute_tuning_curves( Parameters ---------- - group : TsGroup, TsdFrame or dict of Ts/Tsd object. - The group of Ts/Tsd for which the tuning curves will be computed - features : Tsd/TsdFrame + group : TsGroup, TsdFrame or dict of Ts or Tsd objects. + The group of Ts or Tsd for which the tuning curves will be computed + features : Tsd, TsdFrame The features (i.e. one column per feature). bins : sequence or int The bin specification: @@ -123,7 +123,7 @@ def compute_tuning_curves( Returns ------- - xarray + xarray.DataArray An xarray DataArray containing the tuning curves with labeled dimensions. """ From fcc3e6a6bb9eaf1ff212aa08a6d0ea633ece3286 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 10 Jul 2025 21:21:23 +0000 Subject: [PATCH 036/244] calcium tutorial --- doc/examples/tutorial_calcium_imaging.md | 98 ++++++++++++------------ 1 file changed, 51 insertions(+), 47 deletions(-) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index 014e55d05..20b85be76 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -1,16 +1,14 @@ --- -jupyter: - jupytext: - default_lexer: ipython3 - text_representation: - extension: .md - format_name: markdown - format_version: '1.3' - jupytext_version: 1.17.2 - kernelspec: - display_name: pynapple - language: python - name: python3 +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.16.4 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 --- Calcium Imaging @@ -18,27 +16,31 @@ Calcium Imaging Working with calcium data. -For the example dataset, we will be working with a recording of a freely-moving mouse imaged with a Miniscope (1-photon imaging). The area recorded for this experiment is the postsubiculum - a region that is known to contain head-direction cells, or cells that fire when the animal's head is pointing in a specific direction. +As example dataset, we will be working with a recording of a freely-moving mouse imaged with a Miniscope (1-photon imaging). +The area recorded for this experiment is the postsubiculum - a region that is known to contain head-direction cells, or cells that fire when the animal's head is pointing in a specific direction. The NWB file for the example is hosted on [OSF](https://osf.io/sbnaw). We show below how to stream it. -```python jupyter={"outputs_hidden": false} +```{code-cell} ipython3 +:tags: [hide-output] import pynapple as nap import matplotlib.pyplot as plt import seaborn as sns import os import requests +import xarray as xr custom_params = {"axes.spines.right": False, "axes.spines.top": False} sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params) +xr.set_options(display_expand_attrs=False) ``` *** Downloading the data ------------------ -First things first: Let's find our file +First things first: let's find our file. -```python jupyter={"outputs_hidden": false} +```{code-cell} ipython3 path = "A0670-221213.nwb" if path not in os.listdir("."): r = requests.get(f"https://osf.io/sbnaw/download", stream=True) @@ -51,18 +53,18 @@ if path not in os.listdir("."): *** Parsing the data ------------------ -Now that we have the file, let's load the data +Now that we have the file, let's load the data: -```python jupyter={"outputs_hidden": false} +```{code-cell} ipython3 data = nap.load_file(path, lazy_loading=False) -print(data) +data ``` -Let's save the RoiResponseSeries as a variable called 'transients' and print it +Let's save the RoiResponseSeries as a variable called 'transients' and print it: -```python jupyter={"outputs_hidden": false} +```{code-cell} ipython3 transients = data['RoiResponseSeries'] -print(transients) +transients ``` *** @@ -70,7 +72,7 @@ Plotting the activity of one neuron ----------------------------------- Our transients are saved as a (35757, 65) TsdFrame. Looking at the printed object, you can see that we have 35757 data points for each of our 65 regions of interest (ROIs). We want to see which of these are head-direction cells, so we need to plot a tuning curve of fluorescence vs head-direction of the animal. -```python jupyter={"outputs_hidden": false} +```{code-cell} ipython3 plt.figure(figsize=(6, 2)) plt.plot(transients[0:2000,0], linewidth=5) plt.xlabel("Time (s)") @@ -78,55 +80,56 @@ plt.ylabel("Fluorescence") plt.show() ``` -Here we extract the head-direction as a variable called angle. +Here, we extract the head-direction as a variable called angle. -```python jupyter={"outputs_hidden": false} +```{code-cell} ipython3 angle = data['ry'] -print(angle) +angle ``` As you can see, we have a longer recording for our tracking of the animal's head than we do for our calcium imaging - something to keep in mind. -```python jupyter={"outputs_hidden": false} -print(transients.time_support) -print(angle.time_support) +```{code-cell} ipython3 +transients.time_support +angle.time_support ``` *** Calcium tuning curves --------------------- -Here we compute the tuning curves of all the ROIs. +Here, we compute the tuning curves of all the ROIs. -```python jupyter={"outputs_hidden": false} -tcurves = nap.compute_tuning_curves(transients, angle, bins = 120) -tcurves +```{code-cell} ipython3 +tuning_curves = nap.compute_tuning_curves(transients, angle, bins=120) +tuning_curves ``` This yields an `xarray.DataFrame`, which we can beautify by setting feature names and units: -```python -def set_metadata(tcurves): - _tcurves=tcurves.rename({"feature0": "Angle", "unit": "ROI"}) - _tcurves.name="Fluorescence" - _tcurves.attrs["units"]="a.u." - _tcurves.coords["Angle"].attrs["units"]="rad" - return _tcurves +```{code-cell} ipython3 +def set_metadata(tuning_curves): + _tuning_curves=tuning_curves.rename({"0": "Angle", "unit": "ROI"}) + _tuning_curves.name="Fluorescence" + _tuning_curves.attrs["units"]="a.u." + _tuning_curves.coords["Angle"].attrs["units"]="rad" + return _tuning_curves -annotated_tcurves = set_metadata(tcurves) -annotated_tcurves +annotated_tuning_curves = set_metadata(tuning_curves) +annotated_tuning_curves ``` Having set some metadata, we can easily plot one ROI: -```python -annotated_tcurves[4].plot() +```{code-cell} ipython3 +annotated_tuning_curves[4].plot() +plt.show() ``` It looks like this could be a head-direction cell. One important property of head-directions cells however, is that their firing with respect to head-direction is stable. To check for their stability, we can split our recording in two and compute a tuning curve for each half of the recording. We start by finding the midpoint of the recording, using the function [`get_intervals_center`](pynapple.IntervalSet.get_intervals_center). Using this, then create one new IntervalSet with two rows, one for each half of the recording. -```python jupyter={"outputs_hidden": false} +```{code-cell} ipython3 center = transients.time_support.get_intervals_center() halves = nap.IntervalSet( @@ -135,9 +138,9 @@ halves = nap.IntervalSet( ) ``` -Now we can compute the tuning curves for each half of the recording and plot the tuning curves again. +Now, we can compute the tuning curves for each half of the recording and plot the tuning curves again. -```python jupyter={"outputs_hidden": false} +```{code-cell} ipython3 half1 = nap.compute_tuning_curves(transients, angle, bins = 120, epochs = halves.loc[[0]]) half2 = nap.compute_tuning_curves(transients, angle, bins = 120, epochs = halves.loc[[1]]) @@ -146,6 +149,7 @@ set_metadata(half1[4]).plot(ax=ax1) ax1.set_title("First half") set_metadata(half2[4]).plot(ax=ax2) ax2.set_title("Second half") +plt.show() ``` :::{card} From 2a0f1661dac3045d16c806c034aaf8584a5b1ae0 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 10 Jul 2025 21:33:05 +0000 Subject: [PATCH 037/244] phase tutorial --- doc/examples/tutorial_phase_preferences.md | 71 +++++++++++----------- pynapple/process/tuning_curves.py | 4 +- 2 files changed, 39 insertions(+), 36 deletions(-) diff --git a/doc/examples/tutorial_phase_preferences.md b/doc/examples/tutorial_phase_preferences.md index 31b2418fa..d6ab7deb0 100644 --- a/doc/examples/tutorial_phase_preferences.md +++ b/doc/examples/tutorial_phase_preferences.md @@ -1,16 +1,14 @@ --- -jupyter: - jupytext: - default_lexer: ipython3 - text_representation: - extension: .md - format_name: markdown - format_version: '1.3' - jupytext_version: 1.17.2 - kernelspec: - display_name: pynapple - language: python - name: python3 +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.16.4 +kernelspec: + display_name: Python 3 + language: python + name: python3 --- Spikes-phase coupling @@ -21,7 +19,7 @@ with spiking data, to find phase preferences of spiking units. Specifically, we will examine LFP and spiking data from a period of REM sleep, after traversal of a linear track. -```python +```{code-cell} ipython3 import math import os @@ -43,7 +41,7 @@ Downloading the data ------------------ Let's download the data and save it locally -```python +```{code-cell} ipython3 path = "Achilles_10252013_EEG.nwb" if path not in os.listdir("."): r = requests.get(f"https://osf.io/2dfvp/download", stream=True) @@ -58,10 +56,10 @@ Loading the data ------------------ Let's load and print the full dataset. -```python +```{code-cell} ipython3 data = nap.load_file(path) FS = 1250 # We know from the methods of the paper -print(data) +data ``` *** @@ -69,7 +67,7 @@ Selecting slices ----------------------------------- For later visualization, we define an interval of 3 seconds of data during REM sleep. -```python +```{code-cell} ipython3 ep_ex_rem = nap.IntervalSet( data["rem"]["start"][0] + 97.0, data["rem"]["start"][0] + 100.0, @@ -78,7 +76,7 @@ ep_ex_rem = nap.IntervalSet( Here we restrict the lfp to the REM epochs. -```python +```{code-cell} ipython3 tsd_rem = data["eeg"][:,0].restrict(data["rem"]) # We will also extract spike times from all units in our dataset @@ -91,12 +89,13 @@ Plotting the LFP Activity ----------------------------------- We should first plot our REM Local Field Potential data. -```python +```{code-cell} ipython3 fig, ax = plt.subplots(1, constrained_layout=True, figsize=(10, 3)) ax.plot(tsd_rem.restrict(ep_ex_rem)) ax.set_title("REM Local Field Potential") ax.set_ylabel("LFP (a.u.)") ax.set_xlabel("time (s)") +plt.show() ``` *** @@ -109,20 +108,20 @@ frequencies present in the data. We must define the frequency set that we'd like to use for our decomposition. -```python +```{code-cell} ipython3 freqs = np.geomspace(5, 200, 25) ``` We compute the wavelet transform on our LFP data (only during the example interval). -```python +```{code-cell} ipython3 cwt_rem = nap.compute_wavelet_transform(tsd_rem.restrict(ep_ex_rem), fs=FS, freqs=freqs) ``` *** Now let's plot the calculated wavelet scalogram. -```python +```{code-cell} ipython3 # Define wavelet decomposition plotting function def plot_timefrequency(freqs, powers, ax=None): im = ax.imshow(np.abs(powers), aspect="auto") @@ -147,6 +146,7 @@ ax1.plot(tsd_rem.restrict(ep_ex_rem)) ax1.set_ylabel("LFP (a.u.)") ax1.set_xlabel("Time (s)") ax1.margins(0) +plt.show() ``` *** @@ -155,13 +155,13 @@ Filtering Theta As expected, there is a strong 8Hz component during REM sleep. We can filter it using the function [`nap.apply_bandpass_filter`](pynapple.process.filtering.apply_bandpass_filter). -```python +```{code-cell} ipython3 theta_band = nap.apply_bandpass_filter(tsd_rem, cutoff=(6.0, 10.0), fs=FS) ``` We can plot the original signal and the filtered signal. -```python +```{code-cell} ipython3 plt.figure(constrained_layout=True, figsize=(12, 3)) plt.plot(tsd_rem.restrict(ep_ex_rem), alpha=0.5) plt.plot(theta_band.restrict(ep_ex_rem)) @@ -175,7 +175,7 @@ Computing phase From the filtered signal, it is easy to get the phase using the Hilbert transform. Here we use scipy Hilbert method. -```python +```{code-cell} ipython3 from scipy import signal theta_phase = nap.Tsd(t=theta_band.t, d=np.angle(signal.hilbert(theta_band))) @@ -183,7 +183,7 @@ theta_phase = nap.Tsd(t=theta_band.t, d=np.angle(signal.hilbert(theta_band))) Let's plot the phase. -```python +```{code-cell} ipython3 plt.figure(constrained_layout=True, figsize=(12, 3)) plt.subplot(211) plt.plot(tsd_rem.restrict(ep_ex_rem), alpha=0.5) @@ -203,39 +203,42 @@ of each of the units using the [`compute_tuning_curves`](pynapple.process.tuning We will start by throwing away cells which do not have a high enough firing rate during our interval. -```python +```{code-cell} ipython3 spikes = spikes[spikes.rate > 5.0] ``` The feature is the theta phase during REM sleep. -```python +```{code-cell} ipython3 phase_modulation = nap.compute_tuning_curves( - group=spikes, features=theta_phase, bins=61, range=(-np.pi, np.pi) + group=spikes, + features=theta_phase, + bins=61, + range=(-np.pi, np.pi), + feature_names=[("Phase", "rad")] ) ``` Let's plot the first 3 neurons. -```python -phase_modulation=phase_modulation.rename({"feature0": "Phase"}) +```{code-cell} ipython3 phase_modulation.name="Firing Rate" phase_modulation.attrs["units"]="Hz" -phase_modulation.coords["Phase"].attrs["units"]="rad" phase_modulation[:3].plot(row="unit", col_wrap=3, sharey=False) +plt.show() ``` There is clearly a strong modulation for the third neuron. Finally, we can use the function [`value_from`](pynapple.Ts.value_from) to align each spikes to the corresponding phase position and overlay it with the LFP. -```python +```{code-cell} ipython3 spike_phase = spikes[spikes.index[3]].value_from(theta_phase) ``` Let's plot it. -```python +```{code-cell} ipython3 plt.figure(constrained_layout=True, figsize=(12, 3)) plt.subplot(211) plt.plot(tsd_rem.restrict(ep_ex_rem), alpha=0.5) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index d66982824..713c8ef40 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -90,7 +90,7 @@ def compute_tuning_curves( Parameters ---------- - group : TsGroup, TsdFrame or dict of Ts or Tsd objects. + group : TsGroup, TsdFrame or dict of Ts, Tsd objects. The group of Ts or Tsd for which the tuning curves will be computed features : Tsd, TsdFrame The features (i.e. one column per feature). @@ -124,7 +124,7 @@ def compute_tuning_curves( Returns ------- xarray.DataArray - An xarray DataArray containing the tuning curves with labeled dimensions. + An xarray.DataArray containing the tuning curves with labeled dimensions. """ # check group From dcab7838761809208ee0e146582fc1e943ac6e6d Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 10 Jul 2025 21:39:36 +0000 Subject: [PATCH 038/244] use occupancy from xarrat --- pynapple/process/decoding.py | 43 +++++++++--------------------------- tests/test_decoding.py | 29 +----------------------- 2 files changed, 12 insertions(+), 60 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index de22f6056..48a33b72b 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -3,6 +3,7 @@ """ import numpy as np +import xarray as xr from .. import core as nap @@ -264,7 +265,7 @@ def decode_2d(tuning_curves, group, ep, bin_size, xy, time_units="s", features=N return decoded, p -def decode(tuning_curves, group, epochs, bin_size, time_units="s", features=None): +def decode(tuning_curves, group, epochs, bin_size, time_units="s"): """ Performs Bayesian decoding over n-dimensional features. @@ -288,10 +289,6 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s", features=None Bin size. Default is second. Use the parameter time_units to change it. time_units : str, optional Time unit of the bin size ('s' [default], 'ms', 'us'). - features : TsdFrame - The features used to compute the tuning curves. - Used to correct for occupancy. - If feature is not passed, the occupancy is uniform. Returns ------- @@ -309,6 +306,12 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s", features=None """ + # check tuning curves + if not isinstance(tuning_curves, (xr.DataArray)): + raise TypeError( + "tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves" + ) + # check group if isinstance(group, (dict, nap.TsGroup)): numcells = len(group) @@ -335,34 +338,10 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s", features=None else: raise RuntimeError("Unknown format for group") - # occupancy - if features is None: - occupancy = np.ones_like(tuning_curves[0]).flatten() - elif isinstance(features, (nap.TsdFrame, nap.Tsd)): - if isinstance(features, nap.Tsd): - features = nap.TsdFrame( - t=features.times(), d=features.values, time_support=epochs - ) - if tuning_curves.ndim - 1 != features.shape[1]: - raise RuntimeError("Number of features and tuning_curves do not match.") - - bins = [] - for dim in tuning_curves.dims[1:]: - centers = tuning_curves.coords[dim].values - diffs = np.diff(centers) - edges = centers[:-1] - diffs / 2 - bins.append( - np.concatenate( - ( - edges, - [edges[-1] + diffs[-1], edges[-1] + 2 * diffs[-1]], - ) - ) - ) - occupancy, _ = np.histogramdd(features, bins) - occupancy = occupancy.flatten() + if "occupancy" in tuning_curves.dims: + occupancy = tuning_curves.coords["occupancy"].values.flatten() else: - raise RuntimeError("Features should be a TsdFrame or Tsd.") + occupancy = np.ones_like(tuning_curves[0]).flatten() # Transforming to pure numpy array tc = tuning_curves.values.reshape(tuning_curves.sizes["unit"], -1).T diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 8b2ab07b0..a323785a4 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -53,24 +53,10 @@ def test_decode_1d_with_TsdFrame(): np.testing.assert_array_almost_equal(proba.values, tmp) -def test_decode_1d_with_feature(): - feature, group, tc, epochs = get_testing_set_1d() - decoded, proba = nap.decode(tc, group, epochs, bin_size=1, features=feature) - assert isinstance(decoded, nap.Tsd) - assert isinstance(proba, nap.TsdFrame) - np.testing.assert_array_almost_equal(feature.values, decoded.values) - assert len(decoded) == 100 - assert len(proba) == 100 - tmp = np.ones((100, 2)) - tmp[50:, 0] = 0.0 - tmp[0:50, 1] = 0.0 - np.testing.assert_array_almost_equal(proba.values, tmp) - - def test_decode_1d_with_dict(): feature, group, tc, epochs = get_testing_set_1d() group = dict(group) - decoded, proba = nap.decode(tc, group, epochs, bin_size=1, features=feature) + decoded, proba = nap.decode(tc, group, epochs, bin_size=1) assert isinstance(decoded, nap.Tsd) assert isinstance(proba, nap.TsdFrame) np.testing.assert_array_almost_equal(feature.values, decoded.values) @@ -82,13 +68,6 @@ def test_decode_1d_with_dict(): np.testing.assert_array_almost_equal(proba.values, tmp) -def test_decode_1d_with_wrong_feature(): - feature, group, tc, epochs = get_testing_set_1d() - with pytest.raises(RuntimeError) as e_info: - nap.decode(tc, group, epochs, bin_size=1, features=[1, 2, 3]) - assert str(e_info.value) == "Features should be a TsdFrame or Tsd." - - def test_decode_1d_with_time_units(): feature, group, tc, epochs = get_testing_set_1d() for t, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): @@ -198,12 +177,6 @@ def test_decode_2d_with_dict(): np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) -def test_decode_2d_with_feature(): - features, group, tc, epochs = get_testing_set_2d() - decoded, proba = nap.decode(tc, group, epochs, 1) - np.testing.assert_array_almost_equal(features.values, decoded.values) - - def test_decode_2d_with_time_units(): features, group, tc, epochs = get_testing_set_2d() for t, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): From d2110afb79ba2ea1611d7d0ff48ea8be42c059e3 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 15 Jul 2025 18:31:36 +0000 Subject: [PATCH 039/244] merge tests + update input arguments --- pynapple/process/tuning_curves.py | 83 ++-- tests/test_tuning_curves.py | 573 +++++++++++++++++++++++++++ tests/test_tuning_curves_general.py | 587 ---------------------------- 3 files changed, 614 insertions(+), 629 deletions(-) delete mode 100644 tests/test_tuning_curves_general.py diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 713c8ef40..c78b2a3ee 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -83,7 +83,14 @@ def wrapper(*args, **kwargs): def compute_tuning_curves( - group, features, bins=10, range=None, epochs=None, fs=None, feature_names=None + group, + features, + bins=10, + range=None, + epochs=None, + fs=None, + feature_names=None, + return_pandas=False, ): """ Computes n-dimensional tuning curves relative to n features. @@ -96,7 +103,6 @@ def compute_tuning_curves( The features (i.e. one column per feature). bins : sequence or int The bin specification: - * A sequence of arrays describing the monotonically increasing bin edges along each dimension. * The number of bins for each dimension (nx, ny, ... =bins) @@ -114,17 +120,21 @@ def compute_tuning_curves( fs : float, optional The exact sampling frequency of the features used to normalise the tuning curves. Unit should match that of the features. If not passed, it is estimated. - feature_names : sequence, optional - A sequence of names (and optionally units) for the features. - If not passed, the column names in `features` are used. - If those are not set, they are set to `feature0`, `feature1`, etc. - You can also pass a list of tuples [(name, unit), ...] to set both names - and units in the resulting xarray.DataArray. + feature_names : list, optional + A list of feature names. If not passed, the column names in `features` are used. + return_pandas : bool, optional + If True, the function returns a pandas.DataFrame instead of an xarray.DataArray. + Note that this will not work if the features are not 1D and that occupancy and bin edges + will not be stored as attributes. Returns ------- xarray.DataArray - An xarray.DataArray containing the tuning curves with labeled dimensions. + An xarray.DataArray containing the tuning curves with bin centres as coordinates. + The bin edges and occupancy are stored as attributes. + + Examples + -------- """ # check group @@ -151,26 +161,14 @@ def compute_tuning_curves( # check feature names if feature_names is None: - _feature_names = features.columns - _feature_units = [None] * len(_feature_names) + feature_names = features.columns else: - if not isinstance(feature_names, list): - raise TypeError("feature_names should be a sequence of strings or tuples.") + if not isinstance(feature_names, list) or not all( + isinstance(n, str) for n in feature_names + ): + raise TypeError("feature_names should be a list of strings.") if len(feature_names) != features.shape[1]: raise ValueError("feature_names should match the number of features.") - _feature_names = [] - _feature_units = [] - for feature in feature_names: - if isinstance(feature, str): - _feature_names.append(feature) - _feature_units.append(None) - elif isinstance(feature, tuple) and len(feature) == 2: - _feature_names.append(feature[0]) - _feature_units.append(feature[1]) - else: - raise TypeError( - "feature_names should be a sequence of strings or tuples of strings." - ) # check epochs if epochs is None: @@ -226,23 +224,24 @@ def compute_tuning_curves( tcs[np.isnan(tcs)] = 0.0 tcs[:, occupancy == 0.0] = np.nan - return xr.DataArray( - tcs, - coords={ - "unit": keys, - **{ - str(feature_name): ( - str(feature_name), - e[:-1] + np.diff(e) / 2, - {} if unit is None else {"units": unit}, - ) - for feature_name, unit, e in zip( - _feature_names, _feature_units, bin_edges - ) + if return_pandas and features.shape[1] == 1: + return pd.DataFrame( + tcs.T, + index=bin_edges[0][:-1] + np.diff(bin_edges[0]) / 2, + columns=keys, + ) + else: + return xr.DataArray( + tcs, + coords={ + "unit": keys, + **{ + str(feature_name): e[:-1] + np.diff(e) / 2 + for feature_name, e in zip(feature_names, bin_edges) + }, }, - }, - attrs={"occupancy": occupancy, "bin_edges": bin_edges}, - ) + attrs={"occupancy": occupancy, "bin_edges": bin_edges}, + ) @_validate_tuning_inputs diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index b7ee0a479..2fc77d373 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -5,6 +5,7 @@ import numpy as np import pandas as pd import pytest +import xarray as xr import pynapple as nap @@ -43,6 +44,22 @@ def get_tsdframe(): return nap.TsdFrame(t=np.arange(0, 100), d=np.ones((100, 2))) +def get_group_n(n): + return nap.TsGroup( + {i + 1: nap.Ts(t=np.arange(0, 100, 10 ** (i - 1))) for i in range(n)} + ) + + +def get_features_n(n, fs=10.0): + return nap.TsdFrame( + t=np.arange(0, 100, 1 / fs), + d=np.stack( + [np.arange(0, 100, 1 / fs) % 10 * i for i in range(1, n + 1)], axis=1 + ), + columns=[f"feature{i}" for i in range(n)], + ) + + @pytest.mark.parametrize( "group, dict_ep, expected_exception", [ @@ -179,6 +196,214 @@ def test_compute_2d_mutual_info_errors( nap.compute_2d_mutual_info(dict_tc, features, ep, minmax, bitssec) +@pytest.mark.parametrize( + "group, features, kwargs, expectation", + [ + # group + ( + [1], + get_features_n(1), + {}, + pytest.raises( + TypeError, match="group should be a Tsd, TsdFrame, TsGroup, or dict." + ), + ), + ( + None, + get_features_n(1), + {}, + pytest.raises( + TypeError, match="group should be a Tsd, TsdFrame, TsGroup, or dict." + ), + ), + (get_group_n(1), get_features_n(1), {}, does_not_raise()), + (get_group_n(3), get_features_n(1), {}, does_not_raise()), + (get_group_n(1).count(0.1), get_features_n(1), {}, does_not_raise()), + (get_group_n(3).count(0.1), get_features_n(1), {}, does_not_raise()), + (nap.Tsd(t=[1, 2, 3], d=[1, 1, 1]), get_features_n(1), {}, does_not_raise()), + ({1: nap.Ts([1, 2, 3])}, get_features_n(1), {}, does_not_raise()), + ( + {1: nap.Ts([1, 2, 3]), 2: nap.Ts([1, 2, 3])}, + get_features_n(1), + {}, + does_not_raise(), + ), + # features + ( + get_group_n(1), + [1], + {}, + pytest.raises(TypeError, match="features should be a Tsd or TsdFrame"), + ), + ( + get_group_n(1), + None, + {}, + pytest.raises(TypeError, match="features should be a Tsd or TsdFrame"), + ), + ( + get_group_n(1), + nap.Tsd(d=[1, 1, 1], t=[1, 2, 3]), + {}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(3), + {}, + does_not_raise(), + ), + # epochs + ( + get_group_n(1), + get_features_n(1), + {"epochs": 1}, + pytest.raises(TypeError, match="epochs should be an IntervalSet."), + ), + ( + get_group_n(1), + get_features_n(1), + {"epochs": [1, 2]}, + pytest.raises(TypeError, match="epochs should be an IntervalSet."), + ), + ( + get_group_n(1), + get_features_n(1), + {"epochs": None}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(1), + {"epochs": nap.IntervalSet(0.0, 50.0)}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(1), + {"epochs": nap.IntervalSet([0.0, 30.0], [10.0, 50.0])}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(1), + {"epochs": nap.IntervalSet([0.0, 1000.0])}, + does_not_raise(), + ), + # range + ( + get_group_n(1), + get_features_n(2), + {"range": (0, 1)}, + pytest.raises( + ValueError, + match="range should be a sequence of tuples, one for each feature.", + ), + ), + ( + get_group_n(1), + get_features_n(1), + {"range": (0, 1)}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(1), + {"range": [(0, 1)]}, + does_not_raise(), + ), + # fs + ( + get_group_n(1), + get_features_n(1), + {"fs": "1"}, + pytest.raises(TypeError, match="fs should be a number"), + ), + ( + get_group_n(1), + get_features_n(1), + {"fs": []}, + pytest.raises(TypeError, match="fs should be a number"), + ), + ( + get_group_n(1), + get_features_n(1), + {"fs": 1}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(1), + {"fs": 1.0}, + does_not_raise(), + ), + # feature names + ( + get_group_n(1), + get_features_n(1), + {"feature_names": "feature0"}, + pytest.raises( + TypeError, + match="feature_names should be a list of strings.", + ), + ), + ( + get_group_n(1), + get_features_n(1), + {"feature_names": 0}, + pytest.raises( + TypeError, + match="feature_names should be a list of strings.", + ), + ), + ( + get_group_n(1), + get_features_n(1), + {"feature_names": ["feature0"]}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(1), + {"feature_names": ["feature0", "feature1"]}, + pytest.raises( + ValueError, match="feature_names should match the number of features." + ), + ), + ( + get_group_n(1), + get_features_n(1), + {"feature_names": [1]}, + pytest.raises( + TypeError, + match="feature_names should be a list of strings.", + ), + ), + ( + get_group_n(1), + get_features_n(1), + {"feature_names": [(1,)]}, + pytest.raises( + TypeError, + match="feature_names should be a list of strings.", + ), + ), + ( + get_group_n(1), + get_features_n(1), + {"feature_names": [(1, 1)]}, + pytest.raises( + TypeError, + match="feature_names should be a list of strings.", + ), + ), + ], +) +def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation): + with expectation: + nap.compute_tuning_curves(group, features, **kwargs) + + ######################## # Normal test ######################## @@ -577,3 +802,351 @@ def test_compute_2d_tuning_curves_continuous(tsdframe, nb_bins, kwargs, expected for i in tc.keys(): assert tc[i].shape == nb_bins np.testing.assert_almost_equal(tc[i], expected[i]) + + +@pytest.mark.parametrize( + "group, features, kwargs, expected", + [ + # single rate unit, single feature + ( + get_group_n(1).count(1.0), + get_features_n(1), + {}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # multiple rate units, single feature + ( + get_group_n(2).count(1.0), + get_features_n(1), + {}, + xr.DataArray( + np.concatenate([np.full((1, 10), 10.0), np.full((1, 10), 1.0)]), + dims=["unit", "feature0"], + coords={ + "unit": [1, 2], + "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + }, + ), + ), + # multiple rate units, multiple features + ( + get_group_n(2).count(1.0), + get_features_n(2), + {}, + xr.DataArray( + np.stack( + [ + np.where(np.eye(10), 10.0, np.nan), + np.where(np.eye(10), 1.0, np.nan), + ] + ), + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1, 2], + "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, + }, + ), + ), + # single unit, single feature + ( + get_group_n(1), + get_features_n(1), + {}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # multiple units, single feature + ( + get_group_n(2), + get_features_n(1), + {}, + xr.DataArray( + np.concatenate([np.full((1, 10), 10.0), np.full((1, 10), 1.0)]), + dims=["unit", "feature0"], + coords={ + "unit": [1, 2], + "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + }, + ), + ), + # multiple units, multiple features + ( + get_group_n(2), + get_features_n(2), + {}, + xr.DataArray( + np.stack( + [ + np.where(np.eye(10), 10.0, np.nan), + np.where(np.eye(10), 1.0, np.nan), + ] + ), + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1, 2], + "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, + }, + ), + ), + # single unit, single feature, specified number of bins + ( + get_group_n(1), + get_features_n(1), + {"bins": 5}, + xr.DataArray( + np.full((1, 5), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99}, + ), + ), + # single unit, multiple features, specified number of bins + ( + get_group_n(1), + get_features_n(2), + {"bins": 5}, + xr.DataArray( + np.where(np.eye(5), 10.0, np.nan)[None, :], + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99, + "feature1": np.linspace(0, 19.8, 6)[:-1] + 1.98, + }, + ), + ), + # single unit, multiple features, specified number of bins per feature + ( + get_group_n(1), + get_features_n(2), + {"bins": (5, 4)}, + xr.DataArray( + np.array( + [ + [ + [10.0, np.nan, np.nan, np.nan], + [10.0, 10.0, np.nan, np.nan], + [np.nan, 10.0, 10.0, np.nan], + [np.nan, np.nan, 10.0, 10.0], + [np.nan, np.nan, np.nan, 10.0], + ] + ] + ), + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99, + "feature1": np.linspace(0, 19.8, 5)[:-1] + 2.475, + }, + ), + ), + # single unit, single feature, specified bins + ( + get_group_n(1), + get_features_n(1), + {"bins": [np.linspace(0, 10, 6)]}, + xr.DataArray( + np.full((1, 5), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.arange(1, 11, 2)}, + ), + ), + # single unit, multiple features, specified bins + ( + get_group_n(1), + get_features_n(2), + {"bins": [np.linspace(0, 10, 6), np.linspace(0, 20, 6)]}, + xr.DataArray( + np.where(np.eye(5), 10.0, np.nan)[None, :], + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.arange(1, 11, 2), + "feature1": np.arange(2, 22, 4), + }, + ), + ), + # single unit, single feature, specified range + ( + get_group_n(1), + get_features_n(1), + {"range": [(0, 5)]}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25}, + ), + ), + # single unit, multiple features, specified range per feature + ( + get_group_n(1), + get_features_n(2), + {"range": [(0, 5), (0, 10)]}, + xr.DataArray( + np.where(np.eye(10), 10.0, np.nan)[None, :], + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, + "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, + }, + ), + ), + # single unit, single feature, specified range and number of bins + ( + get_group_n(1), + get_features_n(1), + {"bins": 10, "range": [(0, 5)]}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25}, + ), + ), + # single unit, multiple features, specified range per feature and number of bins + ( + get_group_n(1), + get_features_n(2), + {"bins": 10, "range": [(0, 5), (0, 10)]}, + xr.DataArray( + np.where(np.eye(10), 10.0, np.nan)[None, :], + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, + "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, + }, + ), + ), + # single unit, multiple features, specified range and number of bins per feature + ( + get_group_n(1), + get_features_n(2), + {"bins": (10, 10), "range": [(0, 5), (0, 10)]}, + xr.DataArray( + np.where(np.eye(10), 10.0, np.nan)[None, :], + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, + "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, + }, + ), + ), + # single unit, single feature, specified epochs (smaller) + ( + get_group_n(1), + get_features_n(1), + {"epochs": nap.IntervalSet([0.0, 50.0])}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # single unit, single feature, specified epochs (larger) + ( + get_group_n(1), + get_features_n(1), + {"epochs": nap.IntervalSet([0.0, 200.0])}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # single unit, single feature, specified epochs (multiple) + ( + get_group_n(1), + get_features_n(1), + {"epochs": nap.IntervalSet([0.0, 50.0], [20.0, 70.0])}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # single unit, single feature, specified feature name + ( + get_group_n(1), + get_features_n(1), + {"feature_names": ["f0"]}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "f0"], + coords={"unit": [1], "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # single unit, multiple features, specified feature names + ( + get_group_n(1), + get_features_n(2), + {"feature_names": ["f0", "f1"]}, + xr.DataArray( + np.where(np.eye(10), 10.0, np.nan)[None, :], + dims=["unit", "f0", "f1"], + coords={ + "unit": [1], + "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + "f1": np.linspace(0, 19.8, 11)[:-1] + 0.99, + }, + ), + ), + # single unit, single feature, return_pandas=True + ( + get_group_n(1), + get_features_n(1), + {"return_pandas": True}, + pd.DataFrame( + np.full((10, 1), 10.0), + index=np.linspace(0, 9.9, 11)[:-1] + 0.495, + columns=[1], + ), + ), + # single unit, multiple feature, return_pandas=True + ( + get_group_n(1), + get_features_n(1), + {"return_pandas": True}, + pd.DataFrame( + np.full((10, 1), 10.0), + index=np.linspace(0, 9.9, 11)[:-1] + 0.495, + columns=[1], + ), + ), + # multiple units, multiple features, return_pandas=True + ( + get_group_n(2), + get_features_n(2), + {"return_pandas": True}, + xr.DataArray( + np.stack( + [ + np.where(np.eye(10), 10.0, np.nan), + np.where(np.eye(10), 1.0, np.nan), + ] + ), + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1, 2], + "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, + }, + ), + ), + ], +) +def test_compute_tuning_curves(group, features, kwargs, expected): + tcs = nap.compute_tuning_curves(group, features, **kwargs) + if isinstance(tcs, pd.DataFrame): + pd.testing.assert_frame_equal(tcs, expected) + else: + xr.testing.assert_allclose(tcs, expected) diff --git a/tests/test_tuning_curves_general.py b/tests/test_tuning_curves_general.py deleted file mode 100644 index 040a989ac..000000000 --- a/tests/test_tuning_curves_general.py +++ /dev/null @@ -1,587 +0,0 @@ -"""Tests of tuning curves for `pynapple` package.""" - -from contextlib import nullcontext as does_not_raise - -import numpy as np -import pytest -import xarray as xr - -import pynapple as nap - - -def get_group(n): - return nap.TsGroup( - {i + 1: nap.Ts(t=np.arange(0, 100, 10 ** (i - 1))) for i in range(n)} - ) - - -def get_features(n, fs=10.0): - return nap.TsdFrame( - t=np.arange(0, 100, 1 / fs), - d=np.stack( - [np.arange(0, 100, 1 / fs) % 10 * i for i in range(1, n + 1)], axis=1 - ), - columns=[f"feature{i}" for i in range(n)], - ) - - -@pytest.mark.parametrize( - "group, features, kwargs, expectation", - [ - # group - ( - [1], - get_features(1), - {}, - pytest.raises( - TypeError, match="group should be a Tsd, TsdFrame, TsGroup, or dict." - ), - ), - ( - None, - get_features(1), - {}, - pytest.raises( - TypeError, match="group should be a Tsd, TsdFrame, TsGroup, or dict." - ), - ), - (get_group(1), get_features(1), {}, does_not_raise()), - (get_group(3), get_features(1), {}, does_not_raise()), - (get_group(1).count(0.1), get_features(1), {}, does_not_raise()), - (get_group(3).count(0.1), get_features(1), {}, does_not_raise()), - (nap.Tsd(t=[1, 2, 3], d=[1, 1, 1]), get_features(1), {}, does_not_raise()), - ({1: nap.Ts([1, 2, 3])}, get_features(1), {}, does_not_raise()), - ( - {1: nap.Ts([1, 2, 3]), 2: nap.Ts([1, 2, 3])}, - get_features(1), - {}, - does_not_raise(), - ), - # features - ( - get_group(1), - [1], - {}, - pytest.raises(TypeError, match="features should be a Tsd or TsdFrame"), - ), - ( - get_group(1), - None, - {}, - pytest.raises(TypeError, match="features should be a Tsd or TsdFrame"), - ), - ( - get_group(1), - nap.Tsd(d=[1, 1, 1], t=[1, 2, 3]), - {}, - does_not_raise(), - ), - ( - get_group(1), - get_features(3), - {}, - does_not_raise(), - ), - # epochs - ( - get_group(1), - get_features(1), - {"epochs": 1}, - pytest.raises(TypeError, match="epochs should be an IntervalSet."), - ), - ( - get_group(1), - get_features(1), - {"epochs": [1, 2]}, - pytest.raises(TypeError, match="epochs should be an IntervalSet."), - ), - ( - get_group(1), - get_features(1), - {"epochs": None}, - does_not_raise(), - ), - ( - get_group(1), - get_features(1), - {"epochs": nap.IntervalSet(0.0, 50.0)}, - does_not_raise(), - ), - ( - get_group(1), - get_features(1), - {"epochs": nap.IntervalSet([0.0, 30.0], [10.0, 50.0])}, - does_not_raise(), - ), - ( - get_group(1), - get_features(1), - {"epochs": nap.IntervalSet([0.0, 1000.0])}, - does_not_raise(), - ), - # range - ( - get_group(1), - get_features(2), - {"range": (0, 1)}, - pytest.raises( - ValueError, - match="range should be a sequence of tuples, one for each feature.", - ), - ), - ( - get_group(1), - get_features(1), - {"range": (0, 1)}, - does_not_raise(), - ), - ( - get_group(1), - get_features(1), - {"range": [(0, 1)]}, - does_not_raise(), - ), - # fs - ( - get_group(1), - get_features(1), - {"fs": "1"}, - pytest.raises(TypeError, match="fs should be a number"), - ), - ( - get_group(1), - get_features(1), - {"fs": []}, - pytest.raises(TypeError, match="fs should be a number"), - ), - ( - get_group(1), - get_features(1), - {"fs": 1}, - does_not_raise(), - ), - ( - get_group(1), - get_features(1), - {"fs": 1.0}, - does_not_raise(), - ), - # feature names - ( - get_group(1), - get_features(1), - {"feature_names": "feature0"}, - pytest.raises( - TypeError, - match="feature_names should be a sequence of strings or tuples.", - ), - ), - ( - get_group(1), - get_features(1), - {"feature_names": 0}, - pytest.raises( - TypeError, - match="feature_names should be a sequence of strings or tuples.", - ), - ), - ( - get_group(1), - get_features(1), - {"feature_names": ["feature0"]}, - does_not_raise(), - ), - ( - get_group(1), - get_features(1), - {"feature_names": ["feature0", "feature1"]}, - pytest.raises( - ValueError, match="feature_names should match the number of features." - ), - ), - ( - get_group(1), - get_features(1), - {"feature_names": [1]}, - pytest.raises( - TypeError, - match="feature_names should be a sequence of strings or tuples of strings.", - ), - ), - ( - get_group(1), - get_features(1), - {"feature_names": [(1,)]}, - pytest.raises( - TypeError, - match="feature_names should be a sequence of strings or tuples of strings.", - ), - ), - ( - get_group(1), - get_features(1), - {"feature_names": [("feature0", "x")]}, - does_not_raise(), - ), - ( - get_group(1), - get_features(2), - {"feature_names": [("feature0", "x"), "feature1"]}, - does_not_raise(), - ), - ( - get_group(1), - get_features(2), - {"feature_names": [("feature0", "unit0"), ("feature1", "unit1")]}, - does_not_raise(), - ), - ], -) -def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation): - with expectation: - nap.compute_tuning_curves(group, features, **kwargs) - - -@pytest.mark.parametrize( - "group, features, kwargs, expected", - [ - # single rate unit, single feature - ( - get_group(1).count(1.0), - get_features(1), - {}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, - ), - ), - # multiple rate units, single feature - ( - get_group(2).count(1.0), - get_features(1), - {}, - xr.DataArray( - np.concatenate([np.full((1, 10), 10.0), np.full((1, 10), 1.0)]), - dims=["unit", "feature0"], - coords={ - "unit": [1, 2], - "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, - }, - ), - ), - # multiple rate units, multiple features - ( - get_group(2).count(1.0), - get_features(2), - {}, - xr.DataArray( - np.stack( - [ - np.where(np.eye(10), 10.0, np.nan), - np.where(np.eye(10), 1.0, np.nan), - ] - ), - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1, 2], - "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, - "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, - }, - ), - ), - # single unit, single feature - ( - get_group(1), - get_features(1), - {}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, - ), - ), - # multiple units, single feature - ( - get_group(2), - get_features(1), - {}, - xr.DataArray( - np.concatenate([np.full((1, 10), 10.0), np.full((1, 10), 1.0)]), - dims=["unit", "feature0"], - coords={ - "unit": [1, 2], - "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, - }, - ), - ), - # multiple units, multiple features - ( - get_group(2), - get_features(2), - {}, - xr.DataArray( - np.stack( - [ - np.where(np.eye(10), 10.0, np.nan), - np.where(np.eye(10), 1.0, np.nan), - ] - ), - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1, 2], - "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, - "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, - }, - ), - ), - # single unit, single feature, specified number of bins - ( - get_group(1), - get_features(1), - {"bins": 5}, - xr.DataArray( - np.full((1, 5), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99}, - ), - ), - # single unit, multiple features, specified number of bins - ( - get_group(1), - get_features(2), - {"bins": 5}, - xr.DataArray( - np.where(np.eye(5), 10.0, np.nan)[None, :], - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1], - "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99, - "feature1": np.linspace(0, 19.8, 6)[:-1] + 1.98, - }, - ), - ), - # single unit, multiple features, specified number of bins per feature - ( - get_group(1), - get_features(2), - {"bins": (5, 4)}, - xr.DataArray( - np.array( - [ - [ - [10.0, np.nan, np.nan, np.nan], - [10.0, 10.0, np.nan, np.nan], - [np.nan, 10.0, 10.0, np.nan], - [np.nan, np.nan, 10.0, 10.0], - [np.nan, np.nan, np.nan, 10.0], - ] - ] - ), - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1], - "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99, - "feature1": np.linspace(0, 19.8, 5)[:-1] + 2.475, - }, - ), - ), - # single unit, single feature, specified bins - ( - get_group(1), - get_features(1), - {"bins": [np.linspace(0, 10, 6)]}, - xr.DataArray( - np.full((1, 5), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.arange(1, 11, 2)}, - ), - ), - # single unit, multiple features, specified bins - ( - get_group(1), - get_features(2), - {"bins": [np.linspace(0, 10, 6), np.linspace(0, 20, 6)]}, - xr.DataArray( - np.where(np.eye(5), 10.0, np.nan)[None, :], - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1], - "feature0": np.arange(1, 11, 2), - "feature1": np.arange(2, 22, 4), - }, - ), - ), - # single unit, single feature, specified range - ( - get_group(1), - get_features(1), - {"range": [(0, 5)]}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25}, - ), - ), - # single unit, multiple features, specified range per feature - ( - get_group(1), - get_features(2), - {"range": [(0, 5), (0, 10)]}, - xr.DataArray( - np.where(np.eye(10), 10.0, np.nan)[None, :], - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1], - "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, - "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, - }, - ), - ), - # single unit, single feature, specified range and number of bins - ( - get_group(1), - get_features(1), - {"bins": 10, "range": [(0, 5)]}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25}, - ), - ), - # single unit, multiple features, specified range per feature and number of bins - ( - get_group(1), - get_features(2), - {"bins": 10, "range": [(0, 5), (0, 10)]}, - xr.DataArray( - np.where(np.eye(10), 10.0, np.nan)[None, :], - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1], - "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, - "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, - }, - ), - ), - # single unit, multiple features, specified range and number of bins per feature - ( - get_group(1), - get_features(2), - {"bins": (10, 10), "range": [(0, 5), (0, 10)]}, - xr.DataArray( - np.where(np.eye(10), 10.0, np.nan)[None, :], - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1], - "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, - "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, - }, - ), - ), - # single unit, single feature, specified epochs (smaller) - ( - get_group(1), - get_features(1), - {"epochs": nap.IntervalSet([0.0, 50.0])}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, - ), - ), - # single unit, single feature, specified epochs (larger) - ( - get_group(1), - get_features(1), - {"epochs": nap.IntervalSet([0.0, 200.0])}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, - ), - ), - # single unit, single feature, specified epochs (multiple) - ( - get_group(1), - get_features(1), - {"epochs": nap.IntervalSet([0.0, 50.0], [20.0, 70.0])}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, - ), - ), - # single unit, single feature, specified feature name - ( - get_group(1), - get_features(1), - {"feature_names": ["f0"]}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "f0"], - coords={"unit": [1], "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, - ), - ), - # single unit, single feature, specified feature name and unit - ( - get_group(1), - get_features(1), - {"feature_names": [("f0", "unit0")]}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "f0"], - coords={ - "unit": [1], - "f0": ( - "f0", - np.linspace(0, 9.9, 11)[:-1] + 0.495, - {"units": "unit0"}, - ), - }, - ), - ), - # single unit, multiple features, specified feature names - ( - get_group(1), - get_features(2), - {"feature_names": ["f0", "f1"]}, - xr.DataArray( - np.where(np.eye(10), 10.0, np.nan)[None, :], - dims=["unit", "f0", "f1"], - coords={ - "unit": [1], - "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495, - "f1": np.linspace(0, 19.8, 11)[:-1] + 0.99, - }, - ), - ), - # single unit, multiple features, specified feature names and units - ( - get_group(1), - get_features(2), - {"feature_names": [("f0", "unit0"), ("f1", "unit1")]}, - xr.DataArray( - np.where(np.eye(10), 10.0, np.nan)[None, :], - dims=["unit", "f0", "f1"], - coords={ - "unit": [1], - "f0": ( - "f0", - np.linspace(0, 9.9, 11)[:-1] + 0.495, - {"units": "unit0"}, - ), - "f1": ( - "f1", - np.linspace(0, 19.8, 11)[:-1] + 0.99, - {"units": "unit1"}, - ), - }, - ), - ), - ], -) -def test_compute_tuning_curves(group, features, kwargs, expected): - xr.testing.assert_allclose( - nap.compute_tuning_curves(group, features, **kwargs), expected - ) From 6164817b4ff1910e168def8e7f93e06bad561f69 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 15 Jul 2025 19:58:08 +0000 Subject: [PATCH 040/244] docstring --- pynapple/process/tuning_curves.py | 114 +++++++++++++++++++++++++++--- 1 file changed, 103 insertions(+), 11 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index c78b2a3ee..c3f7e6593 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -103,6 +103,7 @@ def compute_tuning_curves( The features (i.e. one column per feature). bins : sequence or int The bin specification: + * A sequence of arrays describing the monotonically increasing bin edges along each dimension. * The number of bins for each dimension (nx, ny, ... =bins) @@ -130,11 +131,101 @@ def compute_tuning_curves( Returns ------- xarray.DataArray - An xarray.DataArray containing the tuning curves with bin centres as coordinates. + A tensor containing the tuning curves with labeled bin centres. The bin edges and occupancy are stored as attributes. Examples -------- + In the simplest case, we can pass a group of spikes per neuron and a single feature: + + >>> import pynapple as nap + >>> import numpy as np; np.random.seed(42) + >>> group = { + ... 1: nap.Ts(np.arange(0, 100, 0.1)), + ... 2: nap.Ts(np.arange(0, 100, 0.2)) + ... } + >>> feature = nap.Tsd(d=np.arange(0, 100, 0.1) % 1, t=np.arange(0, 100, 0.1)) + >>> tcs = nap.compute_tuning_curves(group, feature, bins=10) + >>> tcs + Size: 160B + array([[10., 10., 10., 10., 10., 10., 10., 10., 10., 10.], + [10., 0., 10., 0., 10., 0., 10., 0., 10., 0.]]) + Coordinates: + * unit (unit) int64 16B 1 2 + * 0 (0) float64 80B 0.045 0.135 0.225 0.315 ... 0.585 0.675 0.765 0.855 + Attributes: + occupancy: [100. 100. 100. 100. 100. 100. 100. 100. 100. 100.] + bin_edges: [array([0. , 0.09, 0.18, 0.27, 0.36, 0.45, 0.54, 0.63, 0.72,... + + The function can also take multiple features, in which case it computes n-dimensional tuning curves. + We can specify the number of bins for each feature: + + >>> features = nap.TsdFrame( + ... d=np.stack([np.arange(0, 100, 0.1) % 1, np.arange(0, 100, 0.1) % 2], axis=1), + ... t=np.arange(0, 100, 0.1) + ... ) + >>> tcs = nap.compute_tuning_curves(group, features, bins=[5, 3]) + >>> tcs + Size: 240B + array([[[10., 10., nan], + [10., 10., 10.], + [10., nan, 10.], + [10., 10., 10.], + [nan, 10., 10.]], + ... + [[ 5., 5., nan], + [ 5., 10., 0.], + [ 5., nan, 5.], + [10., 0., 5.], + [nan, 5., 5.]]]) + Coordinates: + * unit (unit) int64 16B 1 2 + * 0 (0) float64 40B 0.09 0.27 0.45 0.63 0.81 + * 1 (1) float64 24B 0.3167 0.95 1.583 + Attributes: + occupancy: [[100. 100. nan]\\n [100. 50. 50.]\\n [100. nan 100.]\\n [ 5... + bin_edges: [array([0. , 0.18, 0.36, 0.54, 0.72, 0.9 ]), array([0. ... + + Or even specify the bin edges directly: + + >>> tcs = nap.compute_tuning_curves(group, features, bins=[np.linspace(0, 1, 5), np.linspace(0, 2, 3)]) + >>> tcs + Size: 128B + array([[[10. , 10. ], + [10. , 10. ], + [10. , 10. ], + [10. , 10. ]], + ... + [[ 6.66666667, 6.66666667], + [ 5. , 5. ], + [ 3.33333333, 3.33333333], + [ 5. , 5. ]]]) + Coordinates: + * unit (unit) int64 16B 1 2 + * 0 (0) float64 32B 0.125 0.375 0.625 0.875 + * 1 (1) float64 16B 0.5 1.5 + Attributes: + occupancy: [[150. 150.]\\n [100. 100.]\\n [150. 150.]\\n [100. 100.]] + bin_edges: [array([0. , 0.25, 0.5 , 0.75, 1. ]), array([0., 1., 2.])] + + In all of these cases, it is also possible to pass continuous values instead of spikes (e.g. calcium imaging data): + + >>> group = nap.TsdFrame(d=np.random.rand(2000, 3), t=np.arange(0, 100, 0.05)) + >>> tcs = nap.compute_tuning_curves(group, feature, bins=10) + >>> tcs + Size: 240B + array([[0.49147343, 0.50190395, 0.50971339, 0.50128013, 0.54332711, + 0.49712328, 0.49594611, 0.5110517 , 0.52247351, 0.52057658], + [0.51132036, 0.46410557, 0.47732505, 0.49830908, 0.53523019, + 0.53099429, 0.48668499, 0.44198555, 0.49222208, 0.47453398], + [0.46591801, 0.50662914, 0.46875882, 0.48734997, 0.51836574, + 0.50722266, 0.48943577, 0.49730095, 0.47944075, 0.48623693]]) + Coordinates: + * unit (unit) int64 24B 0 1 2 + * 0 (0) float64 80B 0.045 0.135 0.225 0.315 ... 0.585 0.675 0.765 0.855 + Attributes: + occupancy: [100. 100. 100. 100. 100. 100. 100. 100. 100. 100.] + bin_edges: [array([0. , 0.09, 0.18, 0.27, 0.36, 0.45, 0.54, 0.63, 0.72,... """ # check group @@ -341,19 +432,20 @@ def compute_discrete_tuning_curves(group, dict_ep): The function returns a pandas DataFrame with each row being a key of the dictionary of epochs and each column being a neurons. - This function can typically being used for a set of stimulus being presented for multiple epochs. - An example of the dictionary is : + This function can typically being used for a set of stimulus being presented for multiple epochs. + An example of the dictionary is: + + >>> dict_ep = { + "stim0": nap.IntervalSet(start=0, end=1), + "stim1":nap.IntervalSet(start=2, end=3) + } - >>> dict_ep = { - "stim0": nap.IntervalSet(start=0, end=1), - "stim1":nap.IntervalSet(start=2, end=3) - } In this case, the function will return a pandas DataFrame : - >>> tc - neuron0 neuron1 neuron2 - stim0 0 Hz 1 Hz 2 Hz - stim1 3 Hz 4 Hz 5 Hz + >>> tc + neuron0 neuron1 neuron2 + stim0 0 Hz 1 Hz 2 Hz + stim1 3 Hz 4 Hz 5 Hz Parameters From 5b0ad0025000541c4d5cf6ee99d3c1712639ffcd Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 15 Jul 2025 20:31:43 +0000 Subject: [PATCH 041/244] tuning curve notebook --- doc/user_guide/06_tuning_curves.md | 82 ++++++++++++++++++++---------- 1 file changed, 54 insertions(+), 28 deletions(-) diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index 7903f906a..224be8c42 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -14,7 +14,7 @@ kernelspec: # Tuning curves Pynapple can compute n-dimensional tuning curves -(for example, firing rate as a function of 1D angular direction or firing rate as a function of 2D position). +(for example, firing rate as a function of 1D angular direction or firing rate as a function of 2D position). It can also compute average firing rate for different epochs (for example firing rate for different epochs of stimulus presentation). ```{code-cell} ipython3 @@ -43,7 +43,6 @@ tsgroup = nap.TsGroup(group) ## From epochs - The epochs should be stored in a dictionnary: @@ -63,10 +62,8 @@ mean_fr = nap.compute_discrete_tuning_curves(tsgroup, dict_ep) pprint(mean_fr) ``` -## From timestamps activity +## From timestamps -### 1-dimensional tuning curves - ```{code-cell} ipython3 :tags: [hide-cell] from scipy.ndimage import gaussian_filter1d @@ -96,17 +93,32 @@ tsgroup = nap.TsGroup( ) ``` -Mandatory arguments are `TsGroup`, `Tsd` (or `TsdFrame` with 1 column only) -and `bins` for number of bins of the tuning curves. +Mandatory arguments are: +* a `TsGroup`, `Tsd`, or `TsdFrame` containing the neural activity of one or more units. +* a `Tsd` or `TsdFrame` containing the 1 or more features. -If an `IntervalSet` is passed with `epochs`, everything is restricted to `epochs`, -otherwise the time support of the feature is used. +By default, 10 bins are used for all features, but you can specify the number of bins, +or the bin edges explicitly, using the `bins` argument. -The min and max of the tuning curve is by default the min and max of the feature. +The min and max of the tuning curves are by default the minima and maxima of the features. This can be tweaked with the `range` argument. -The output is an `xarray.DataArray` with a unit and feature dimension. -The `feature_names` argument allows for setting feature names and units. +If an `IntervalSet` is passed with `epochs`, everything is restricted to `epochs`, +otherwise the time support of the features is used. + +If you do not want the sampling rate of the features to be estimated from the timestamps, +you can pass it explicitly using the `fs` argument. + +You can further also pass a list of strings to label each dimension via `feature_names` +(by default the columns of the features are used). + +The output is an `xarray.DataArray` with 1 dimension representing the units and further dimensions per feature. +The occupancy and bin edges are stored as attributes. + +If you explicitly want a `pd.DataFrame` as output (which is only possible when you have only 1 feature), +you can set `return_pandas=True`. Note that this will not return the occupancy and bin edges. + +### 1D tuning curves from spikes ```{code-cell} ipython3 tuning_curves_1d = nap.compute_tuning_curves( @@ -119,13 +131,30 @@ tuning_curves_1d = nap.compute_tuning_curves( tuning_curves_1d ``` +The `xarray.DataArray` can be treated like a `numpy` array. + +It has a shape: +```{code-cell} ipython3 +tuning_curves_1d.shape +``` +It can be sliced: +```{code-cell} ipython3 +tuning_curves_1d[1, 2:8] +``` +It can also be indexed using the coordinates: +```{code-cell} ipython3 +tuning_curves_1d.sel(unit=1) +``` + +`xarray` further has `matplotlib` support, allowing for easy visualization: + ```{code-cell} ipython3 tuning_curves_1d.plot.line(x="feature", add_legend=False) plt.ylabel("Firing rate (Hz)") plt.show() ``` -Internally, the function is calling the method [`value_from`](pynapple.Tsd.value_from) which maps timestamps to their closest values in time from a `Tsd` object. +Internally, the `compute_tuning_curves` is calling the method [`value_from`](pynapple.Tsd.value_from) which maps timestamps to their closest values in time from a `Tsd` object. It is then possible to validate the tuning curves by displaying the timestamps as well as their associated values. ```{code-cell} ipython3 @@ -144,7 +173,7 @@ plt.legend() plt.show() ``` -### 2-dimensional tuning curves +### 2D tuning curves from spikes ```{code-cell} ipython3 :tags: [hide-cell] @@ -166,11 +195,7 @@ tsgroup = nap.TsGroup({ }, time_support=epoch) ``` -The `group` argument must be a `TsGroup` object. -The `features` argument must be a 2-columns `TsdFrame` object. -`bins` can be an int or a tuple of 2 ints. -`range` can be a list of two `(min, max)` tuples. - +If you pass more than 1 feature, a multi-dimensional tuning curve is computed: ```{code-cell} ipython3 tuning_curves_2d = nap.compute_tuning_curves( group=tsgroup, @@ -182,10 +207,12 @@ tuning_curves_2d = nap.compute_tuning_curves( tuning_curves_2d ``` -`tuning_curve_2d` is an `xarray.DataArray` with three dimensions: one for the units of `TsGroup` and 2 for the features, the coordinates contain the centers of the bins. +`tuning_curve_2d` is a again an `xarray.DataArray` but now with three dimensions: +one for the units of `TsGroup` and 2 for the features, the coordinates contain the centers of the bins. Bins that have never been visited by the feature have been assigned a NaN value. -Checking the accuracy of the tuning curves can be bone by displaying the spikes aligned to the features with the function `value_from` which assign to each spikes the corresponding features value for unit 0. +Checking the accuracy of the tuning curves can once more be done by displaying the spikes aligned +to the features with the function `value_from` which assign to each spikes the corresponding features value for unit 0. ```{code-cell} ipython3 ts_to_features = tsgroup[0].value_from(features) @@ -224,11 +251,7 @@ plt.tight_layout() plt.show() ``` -## From continuous activity - -Tuning curves computed in the following matter are usually made with data from calcium imaging activities. - -### 1-dimensional tuning curves +### 1D tuning curves from continuous activity ```{code-cell} ipython3 :tags: [hide-cell] @@ -259,7 +282,8 @@ tsdframe = nap.TsdFrame( ) ``` -The same function `nap.compute_tuning_curves` can also take a `TsdFrame` (for example continuous calcium data) as input. +We do not always have spikes. Sometimes we are analysing continuous firing rates or calcium intensities. +In that case, we can simply pass a `Tsd` or `TsdFrame` as group: ```{code-cell} ipython3 tuning_curves_1d = nap.compute_tuning_curves( @@ -278,7 +302,9 @@ plt.ylabel("Firing rate (Hz)") plt.show() ``` -### 2-dimensional tuning curves +### 2D tuning curves from continuous activity + +This also works with more than one feature: ```{code-cell} ipython3 :tags: [hide-cell] From 5c8ce1417efb5edb93082ac2a0216d63cd67eba6 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 15 Jul 2025 20:34:05 +0000 Subject: [PATCH 042/244] remove trailing spaces in md --- doc/examples/tutorial_HD_dataset.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/examples/tutorial_HD_dataset.md b/doc/examples/tutorial_HD_dataset.md index c415b2f23..c1b59f830 100644 --- a/doc/examples/tutorial_HD_dataset.md +++ b/doc/examples/tutorial_HD_dataset.md @@ -70,7 +70,7 @@ print(data) Head-Direction Tuning Curves ------------------ -To plot head-direction tuning curves, we need the spike timings and the orientation of the animal. +To plot head-direction tuning curves, we need the spike timings and the orientation of the animal. These quantities are stored in the variables 'units' and 'ry'. ```{code-cell} ipython3 @@ -87,17 +87,17 @@ print(spikes) Here, rate is the mean firing rate of the unit. Location indicates the brain region the unit was recorded from, and group refers to the shank number on which the cell was located. -This dataset contains units recorded from the anterior thalamus. Head-direction (HD) cells are found in the anterodorsal nucleus of the thalamus (henceforth referred to as ADn). -Units were also recorded from nearby thalamic nuclei in this animal. -For the purposes of our tutorial, we are interested in the units recorded in ADn. -We can restrict ourselves to analysis of these units rather easily, using Pynapple. +This dataset contains units recorded from the anterior thalamus. Head-direction (HD) cells are found in the anterodorsal nucleus of the thalamus (henceforth referred to as ADn). +Units were also recorded from nearby thalamic nuclei in this animal. +For the purposes of our tutorial, we are interested in the units recorded in ADn. +We can restrict ourselves to analysis of these units rather easily, using Pynapple. ```{code-cell} ipython3 spikes_adn = spikes.getby_category("location")["adn"] # Select only those units that are in ADn print(spikes_adn) ``` -Let's compute some head-direction tuning curves. +Let's compute some head-direction tuning curves. To do this in Pynapple, all you need is a single line of code! Let's plot firing rate of ADn units as a function of heading direction, i.e. a head-direction tuning curve: @@ -114,14 +114,14 @@ tuning_curves = nap.compute_tuning_curves( tuning_curves ``` -The output is an `xarray.DataArray` with one dimension representing units, and another for head-direction angles. +The output is an `xarray.DataArray` with one dimension representing units, and another for head-direction angles. Let's compute the preferred angle quickly as follows: ```{code-cell} ipython3 pref_ang = tuning_curves.idxmax(dim="head_direction") ``` -For easier visualization, we will color our plots according to the preferred angle of the cell. +For easier visualization, we will color our plots according to the preferred angle of the cell. To do so, we will normalize the range of angles we have, over a colormap. ```{code-cell} ipython3 @@ -179,7 +179,7 @@ Decoding Now that we have HD tuning curves, we can go one step further. Using only the population activity of ADn units, we can decode the direction the animal is looking in. We will then compare this to the real head-direction of the animal, and discover that population activity in the ADn indeed codes for HD. -To decode the population activity, we will be using a bayesian decoder as implemented in Pynapple. +To decode the population activity, we will be using a bayesian decoder as implemented in Pynapple. Again, just a single line of code! ```{code-cell} ipython3 @@ -276,7 +276,7 @@ plt.show() ``` -From this probability distribution, we observe that the decoded HD closely matches the actual HD. +From this probability distribution, we observe that the decoded HD closely matches the actual HD. Hence, the population activity in ADn is a reliable estimate of the heading direction of the animal. I hope this tutorial was helpful. If you have any questions, comments or suggestions, please feel free to reach out to the Pynapple Team! From 201a89eb11a39dcd0668f85ad030388cdeafe979 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 15 Jul 2025 20:36:56 +0000 Subject: [PATCH 043/244] more trailing spaces --- doc/examples/tutorial_calcium_imaging.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index 20b85be76..e274d0d6f 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -16,7 +16,7 @@ Calcium Imaging Working with calcium data. -As example dataset, we will be working with a recording of a freely-moving mouse imaged with a Miniscope (1-photon imaging). +As example dataset, we will be working with a recording of a freely-moving mouse imaged with a Miniscope (1-photon imaging). The area recorded for this experiment is the postsubiculum - a region that is known to contain head-direction cells, or cells that fire when the animal's head is pointing in a specific direction. The NWB file for the example is hosted on [OSF](https://osf.io/sbnaw). We show below how to stream it. From 70a941e6195833571d87e7a92b29ac7868cafe0e Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 15 Jul 2025 20:37:57 +0000 Subject: [PATCH 044/244] typo --- doc/examples/tutorial_phase_preferences.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/examples/tutorial_phase_preferences.md b/doc/examples/tutorial_phase_preferences.md index d6ab7deb0..479171c7f 100644 --- a/doc/examples/tutorial_phase_preferences.md +++ b/doc/examples/tutorial_phase_preferences.md @@ -39,7 +39,7 @@ sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_par *** Downloading the data ------------------ -Let's download the data and save it locally +Let's download the data and save it locally. ```{code-cell} ipython3 path = "Achilles_10252013_EEG.nwb" From 4e27d49aae951b6f1e4c5e1b6388b88afa6ee044 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 15 Jul 2025 20:38:34 +0000 Subject: [PATCH 045/244] white spaces --- doc/examples/tutorial_pynapple_dandi.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/examples/tutorial_pynapple_dandi.md b/doc/examples/tutorial_pynapple_dandi.md index 1dbace0c0..a0061a6ca 100644 --- a/doc/examples/tutorial_pynapple_dandi.md +++ b/doc/examples/tutorial_pynapple_dandi.md @@ -109,7 +109,7 @@ tuning_curves.plot(row="unit", col_wrap=4, figsize=(15, 7)) plt.show() ``` -Let's plot the spikes of unit 1, which has a nice grid. +Let's plot the spikes of unit 1, which has a nice grid. Here, I use the [`value_from`](pynapple.Ts.value_from) function to assign to each spike the closest position in time. ```{code-cell} ipython3 From 75151e7bf37d04934520a1c77d684895a820df3d Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 16 Jul 2025 16:08:59 +0000 Subject: [PATCH 046/244] tuning curves user guide update --- doc/user_guide/06_tuning_curves.md | 107 +++++++++++++---------------- pynapple/process/tuning_curves.py | 4 ++ tests/test_tuning_curves.py | 25 +++++++ 3 files changed, 77 insertions(+), 59 deletions(-) diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index 224be8c42..7b672fd99 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -13,9 +13,10 @@ kernelspec: # Tuning curves -Pynapple can compute n-dimensional tuning curves +With Pynapple you can easily compute n-dimensional tuning curves (for example, firing rate as a function of 1D angular direction or firing rate as a function of 2D position). -It can also compute average firing rate for different epochs (for example firing rate for different epochs of stimulus presentation). +It is also possible to compute average firing rate for different epochs +(for example firing rate for different epochs of stimulus presentation). ```{code-cell} ipython3 :tags: [hide-cell] @@ -43,7 +44,7 @@ tsgroup = nap.TsGroup(group) ## From epochs -The epochs should be stored in a dictionnary: +When computing from epochs, you should store them in a dictionary: ```{code-cell} ipython3 @@ -54,12 +55,12 @@ dict_ep = { ``` [`nap.compute_discrete_tuning_curves`](pynapple.process.tuning_curves.compute_discrete_tuning_curves) takes a `TsGroup` for spiking activity and a dictionary of epochs. -The output is a pandas DataFrame where each column is a unit in the `TsGroup` and each row is one `IntervalSet` type. -The value is the mean firing rate of the neuron during this set of intervals. +The output is a pandas DataFrame where each column is a unit in the `TsGroup` and each row is one `IntervalSet`. +The output will be the mean firing rate of the neuron during this set of intervals. ```{code-cell} ipython3 mean_fr = nap.compute_discrete_tuning_curves(tsgroup, dict_ep) -pprint(mean_fr) +print(mean_fr) ``` ## From timestamps @@ -93,9 +94,9 @@ tsgroup = nap.TsGroup( ) ``` -Mandatory arguments are: +When computing from general time-series, mandatory arguments are: * a `TsGroup`, `Tsd`, or `TsdFrame` containing the neural activity of one or more units. -* a `Tsd` or `TsdFrame` containing the 1 or more features. +* a `Tsd` or `TsdFrame` containing one or more features. By default, 10 bins are used for all features, but you can specify the number of bins, or the bin edges explicitly, using the `bins` argument. @@ -112,10 +113,10 @@ you can pass it explicitly using the `fs` argument. You can further also pass a list of strings to label each dimension via `feature_names` (by default the columns of the features are used). -The output is an `xarray.DataArray` with 1 dimension representing the units and further dimensions per feature. +The output is an `xarray.DataArray` in which the first dimension represents the units and further dimensions represent the features. The occupancy and bin edges are stored as attributes. -If you explicitly want a `pd.DataFrame` as output (which is only possible when you have only 1 feature), +If you explicitly want a `pd.DataFrame` as output (which is only possible when you have just the one feature), you can set `return_pandas=True`. Note that this will not return the occupancy and bin edges. ### 1D tuning curves from spikes @@ -154,7 +155,16 @@ plt.ylabel("Firing rate (Hz)") plt.show() ``` -Internally, the `compute_tuning_curves` is calling the method [`value_from`](pynapple.Tsd.value_from) which maps timestamps to their closest values in time from a `Tsd` object. +You can either customize the plot labels yourself using `matplotlib`, or you can set them in the tuning curve object: +```{code-cell} ipython3 +tuning_curves_1d.name = "Firing rate" +tuning_curves_1d.attrs["unit"] = "Hz" +tuning_curves_1d.coords["feature"].attrs["unit"] = "rad" +tuning_curves_1d.plot.line(x="feature", add_legend=False) +plt.show() +``` + +Internally, the `compute_tuning_curves` is calling the method [`value_from`](pynapple.Tsd.value_from) which maps timestamps to their closest values in time from a `Tsd` object. It is then possible to validate the tuning curves by displaying the timestamps as well as their associated values. ```{code-cell} ipython3 @@ -202,16 +212,25 @@ tuning_curves_2d = nap.compute_tuning_curves( features=features, bins=(5,5), range=[(-1, 1), (-1, 1)], - feature_names=["x", "y"] + feature_names=["a", "b"] ) tuning_curves_2d ``` `tuning_curve_2d` is a again an `xarray.DataArray` but now with three dimensions: -one for the units of `TsGroup` and 2 for the features, the coordinates contain the centers of the bins. +one for the units of `TsGroup` and 2 for the features, the coordinates contain the centers of the bins. Bins that have never been visited by the feature have been assigned a NaN value. -Checking the accuracy of the tuning curves can once more be done by displaying the spikes aligned +Two-dimensional tuning curves can also easily be visualized: + +```{code-cell} ipython3 +tuning_curves_2d.name="Firing rate" +tuning_curves_2d.attrs["unit"]="Hz" +tuning_curves_2d.plot(col="unit") +plt.show() +``` + +Verifying the accuracy of the tuning curves can once more be done by displaying the spikes aligned to the features with the function `value_from` which assign to each spikes the corresponding features value for unit 0. ```{code-cell} ipython3 @@ -223,30 +242,21 @@ print(ts_to_features) ```{code-cell} ipython3 :tags: [hide-input] -plt.figure() -plt.subplot(121) -plt.plot(features["b"], features["a"], label="features") -plt.plot(ts_to_features["b"], ts_to_features["a"], "o", color="red", markersize=4, label="spikes") -plt.xlabel("feature b") -plt.ylabel("feature a") -[plt.axvline(b, linewidth=0.5, color='grey') for b in np.linspace(-1, 1, 6)] -[plt.axhline(b, linewidth=0.5, color='grey') for b in np.linspace(-1, 1, 6)] -plt.subplot(122) +fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8,4), sharey=True) +ax1.plot(features["b"], features["a"], label="features") +ax1.plot(ts_to_features["b"], ts_to_features["a"], "o", color="red", markersize=4, label="spikes") +ax1.set_xlabel("b") +ax1.set_ylabel("a") +[ax1.axvline(b, linewidth=0.5, color='grey') for b in np.linspace(-1, 1, 6)] +[ax1.axhline(b, linewidth=0.5, color='grey') for b in np.linspace(-1, 1, 6)] extents = ( np.min(features["a"]), np.max(features["a"]), np.min(features["b"]), np.max(features["b"]), ) -plt.imshow(tuning_curves_2d[0], - origin="lower", extent=extents, cmap="viridis", - aspect='auto' - ) -plt.title("Tuning curve unit 0") -plt.xlabel("feature b") -plt.ylabel("feature a") -plt.grid(False) -plt.colorbar() +tuning_curves_2d[0].plot(ax=ax2) +ax2.set_ylabel("") plt.tight_layout() plt.show() ``` @@ -297,8 +307,9 @@ tuning_curves_1d ``` ```{code-cell} ipython3 +tuning_curves_1d.name="ΔF/F" +tuning_curves_1d.attrs["unit"]="a.u." tuning_curves_1d.plot.line(x="feature", add_legend=False) -plt.ylabel("Firing rate (Hz)") plt.show() ``` @@ -333,36 +344,14 @@ tuning_curves_2d = nap.compute_tuning_curves( group=tsdframe, features=features, bins=5, - feature_names=["x", "y"] + feature_names=["a", "b"] ) tuning_curves_2d ``` ```{code-cell} ipython3 -:tags: [hide-input] -plt.figure() -plt.subplot(121) -plt.plot(features["b"], features["a"], label="features") -plt.xlabel("feature b") -plt.ylabel("feature a") -[plt.axvline(b, linewidth=0.5, color='grey') for b in np.linspace(-1, 1, 6)] -[plt.axhline(b, linewidth=0.5, color='grey') for b in np.linspace(-1, 1, 6)] -plt.subplot(122) -extents = ( - np.min(features["a"]), - np.max(features["a"]), - np.min(features["b"]), - np.max(features["b"]), -) -plt.imshow(tuning_curves_2d[0], - origin="lower", extent=extents, cmap="viridis", - aspect='auto' - ) -plt.title("Tuning curve unit 0") -plt.xlabel("feature b") -plt.ylabel("feature a") -plt.grid(False) -plt.colorbar() -plt.tight_layout() +tuning_curves_2d.name="ΔF/F" +tuning_curves_2d.attrs["unit"]="a.u." +tuning_curves_2d.plot(col="unit") plt.show() ``` diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index c3f7e6593..9f38b6427 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -285,6 +285,10 @@ def compute_tuning_curves( "range should be a sequence of tuples, one for each feature." ) + # check return_pandas + if not isinstance(return_pandas, bool): + raise TypeError("return_pandas should be a boolean.") + # occupancy occupancy, bin_edges = np.histogramdd(features, bins=bins, range=range) diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 2fc77d373..8ef611ac5 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -397,6 +397,31 @@ def test_compute_2d_mutual_info_errors( match="feature_names should be a list of strings.", ), ), + # return pandas + ( + get_group_n(1), + get_features_n(1), + {"return_pandas": 1}, + pytest.raises( + TypeError, + match="return_pandas should be a boolean.", + ), + ), + ( + get_group_n(1), + get_features_n(1), + {"return_pandas": "1"}, + pytest.raises( + TypeError, + match="return_pandas should be a boolean.", + ), + ), + ( + get_group_n(1), + get_features_n(1), + {"return_pandas": True}, + does_not_raise(), + ), ], ) def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation): From f8ef226a6f8bea0a0d2a50a6897b6c2de107762a Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 16 Jul 2025 16:18:40 +0000 Subject: [PATCH 047/244] cleaning up docstring --- pynapple/process/tuning_curves.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 9f38b6427..815051743 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -97,7 +97,7 @@ def compute_tuning_curves( Parameters ---------- - group : TsGroup, TsdFrame or dict of Ts, Tsd objects. + group : TsGroup, TsdFrame or dict of Ts, Tsd The group of Ts or Tsd for which the tuning curves will be computed features : Tsd, TsdFrame The features (i.e. one column per feature). @@ -161,7 +161,13 @@ def compute_tuning_curves( We can specify the number of bins for each feature: >>> features = nap.TsdFrame( - ... d=np.stack([np.arange(0, 100, 0.1) % 1, np.arange(0, 100, 0.1) % 2], axis=1), + ... d=np.stack( + ... [ + ... np.arange(0, 100, 0.1) % 1, + ... np.arange(0, 100, 0.1) % 2 + ... ], + ... axis=1 + ... ), ... t=np.arange(0, 100, 0.1) ... ) >>> tcs = nap.compute_tuning_curves(group, features, bins=[5, 3]) @@ -188,7 +194,11 @@ def compute_tuning_curves( Or even specify the bin edges directly: - >>> tcs = nap.compute_tuning_curves(group, features, bins=[np.linspace(0, 1, 5), np.linspace(0, 2, 3)]) + >>> tcs = nap.compute_tuning_curves( + ... group, + ... features, + ... bins=[np.linspace(0, 1, 5), np.linspace(0, 2, 3)] + ... ) >>> tcs Size: 128B array([[[10. , 10. ], From 7cd86934b28550c0856b46ef822f8166f901ca1e Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 16 Jul 2025 18:32:55 +0000 Subject: [PATCH 048/244] fix tutorial --- doc/examples/tutorial_phase_preferences.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/examples/tutorial_phase_preferences.md b/doc/examples/tutorial_phase_preferences.md index 479171c7f..7ee21dc95 100644 --- a/doc/examples/tutorial_phase_preferences.md +++ b/doc/examples/tutorial_phase_preferences.md @@ -215,7 +215,7 @@ phase_modulation = nap.compute_tuning_curves( features=theta_phase, bins=61, range=(-np.pi, np.pi), - feature_names=[("Phase", "rad")] + feature_names=["Phase"] ) ``` @@ -224,6 +224,7 @@ Let's plot the first 3 neurons. ```{code-cell} ipython3 phase_modulation.name="Firing Rate" phase_modulation.attrs["units"]="Hz" +phase_modulation.coords["Phase"].attrs["unit"]="rad" phase_modulation[:3].plot(row="unit", col_wrap=3, sharey=False) plt.show() ``` From 9d3aff576918d4dd0cff0fdbf2506f22b795c9d7 Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Thu, 17 Jul 2025 09:45:21 -0400 Subject: [PATCH 049/244] add script for matching param --- scripts/check_parameter_naming.py | 96 +++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 scripts/check_parameter_naming.py diff --git a/scripts/check_parameter_naming.py b/scripts/check_parameter_naming.py new file mode 100644 index 000000000..1fc7d7c9b --- /dev/null +++ b/scripts/check_parameter_naming.py @@ -0,0 +1,96 @@ +import difflib +import inspect +import types + +def collect_similar_parameter_names(package, root_name=None, similarity_cutoff=0.8): + """ + Recursively collect and groups similar parameter names from functions and methods. + + This function traverses the given package and its submodules, extracting parameter + names from all user-defined functions and methods. Parameter names that are + lexically similar (based on `difflib.get_close_matches`) are grouped together. + This can be used to detect inconsistent naming conventions across a codebase. + + Parameters + ---------- + package : module + The root package to analyze (e.g., `pynapple`). + root_name : str, optional + The dotted name of the root package. If not provided, it is inferred from + `package.__name__`. + similarity_cutoff : float, optional + Similarity threshold between 0 and 1 used to group parameters based on + lexical similarity (default is 0.8). + + Returns + ------- + dict + A dictionary mapping canonical parameter names to a list of tuples. + Each tuple contains: + - The actual parameter name + - The fully qualified function or method path where it appears + + Example + ------- + { + "time": [("time", "pynapple.core.Tsd.__init__"), ("t", "pynapple.io.load")], + ... + } + """ + if root_name is None: + root_name = package.__name__ + + results = {} + visited_ids = set() + + def process_function(func, path): + try: + sig = inspect.signature(func) + param_names = list(sig.parameters) + for par in param_names: + if par in results: + continue # exact name already exists + match = difflib.get_close_matches(par, results.keys(), n=1, cutoff=similarity_cutoff) + if match: + results[match[0]].append((par, path)) + else: + results[par] = [(par, path)] + except Exception: + pass # some built-ins or extension modules may not support signature() + + def walk(obj, path_prefix=""): + if id(obj) in visited_ids: + return + visited_ids.add(id(obj)) + + if inspect.isfunction(obj) or inspect.ismethod(obj): + if getattr(obj, '__module__', '').startswith(root_name): + process_function(obj, path_prefix) + + elif inspect.isclass(obj): + if getattr(obj, '__module__', '').startswith(root_name): + for name, member in inspect.getmembers(obj): + if name.startswith("_"): + continue + walk(member, f"{path_prefix}.{name}") + + elif isinstance(obj, types.ModuleType): + if not getattr(obj, '__name__', '').startswith(root_name): + return # external module, skip + for name, member in inspect.getmembers(obj): + if name.startswith("_"): + continue + walk(member, f"{path_prefix}.{name}") + + walk(package, package.__name__) + return results + +if __name__ == "__main__": + import pynapple as nap + + params = collect_similar_parameter_names(nap, similarity_cutoff=0.9) + + for name, occurrences in params.copy().items(): + if len(occurrences) == 1: + params.pop(name) + From 97c67afebdeaf83e059f6694dbd33f1192ee0f4a Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 17 Jul 2025 16:24:48 +0000 Subject: [PATCH 050/244] merge --- pynapple/process/decoding.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 48a33b72b..83be005e0 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -279,9 +279,9 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s"): Parameters ---------- tuning_curves : xr.DataArray - Tuning curves as outputed by `compute_tuning_curves` (one for each neuron). + Tuning curves as outputed by `compute_tuning_curves` (one for each unit). group : TsGroup, TsdFrame or dict of Ts/Tsd object. - A group of neurons with the same keys as tuning_curves dictionary. + A group of neurons with the same keys as the tuning curves. You may also pass a TsdFrame with smoothed rates (recommended). epochs : IntervalSet The epochs on which decoding is computed @@ -307,7 +307,7 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s"): """ # check tuning curves - if not isinstance(tuning_curves, (xr.DataArray)): + if not isinstance(tuning_curves, xr.DataArray): raise TypeError( "tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves" ) From cdab5b11bf8dde5998c688f5ecd1d2f5f88e1212 Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Thu, 17 Jul 2025 12:58:05 -0400 Subject: [PATCH 051/244] add a tox env --- tox.ini | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index e6ad4d7ad..15e27741d 100644 --- a/tox.ini +++ b/tox.ini @@ -1,7 +1,6 @@ [tox] isolated_build = True envlist = py310,py311 -requires = tox-conda [testenv] # means we'll run the equivalent of `pip install .[dev]`, also installing pytest @@ -18,6 +17,10 @@ commands = coverage run --source=pynapple --branch -m pytest tests/ coverage report -m +[testenv:params] +commands= + python scripts/check_parameter_naming.py + [gh-actions] python = 3.10: py310 From ec827efffcaec482302d73e6414532970b3a2435 Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Thu, 17 Jul 2025 12:58:12 -0400 Subject: [PATCH 052/244] add to ci --- .github/workflows/main.yml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index b88cff142..5c0db6fac 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -20,7 +20,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: "3.10" + python-version: "3.11" - name: Install dependencies run: | echo "testing: ${{github.ref}}" @@ -33,6 +33,16 @@ jobs: flake8 pynapple --max-complexity 10 black --check tests isort --check tests --profile black + check-param-naming: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.11" + - name: Check parameter name consistency + run: python scripts/check_parameter_naming.py test: needs: lint runs-on: ${{ matrix.os }} From 1325a3dce4e7733c1a3195bc1296c4a41a10f6e5 Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Thu, 17 Jul 2025 12:58:20 -0400 Subject: [PATCH 053/244] improve err message --- scripts/check_parameter_naming.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/scripts/check_parameter_naming.py b/scripts/check_parameter_naming.py index 1fc7d7c9b..7b0bee650 100644 --- a/scripts/check_parameter_naming.py +++ b/scripts/check_parameter_naming.py @@ -94,3 +94,12 @@ def walk(obj, path_prefix=""): if len(occurrences) == 1: params.pop(name) + matches = [] + for name, occurrences in params.items(): + matches.append(f"{name}:\n") + for occurrence in occurrences: + matches.append(f"\t- {occurrence[1]}: {occurrence[0]}\n") + matches.append("\n") + + if params: + raise ValueError("Inconsistency in parameter naming fonund!\n\n" + "".join(matches)) \ No newline at end of file From 2141deab087908a968619ef9359116c96c03a663 Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Thu, 17 Jul 2025 13:03:25 -0400 Subject: [PATCH 054/244] fix ci --- .github/workflows/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 5c0db6fac..9c939f335 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -41,6 +41,10 @@ jobs: uses: actions/setup-python@v4 with: python-version: "3.11" + - name: Install pynapple + run: | + python -m pip install --upgrade pip + pip install . - name: Check parameter name consistency run: python scripts/check_parameter_naming.py test: From 3d3fd29dce9c49de783b5a0b0bd584bd7be4d9aa Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Thu, 17 Jul 2025 13:53:09 -0400 Subject: [PATCH 055/244] use a logger --- scripts/check_parameter_naming.py | 114 +++++------------------------- 1 file changed, 18 insertions(+), 96 deletions(-) diff --git a/scripts/check_parameter_naming.py b/scripts/check_parameter_naming.py index 7b0bee650..b0d36c714 100644 --- a/scripts/check_parameter_naming.py +++ b/scripts/check_parameter_naming.py @@ -1,105 +1,27 @@ -import difflib -import inspect -import types +import logging +import sys +import pynapple as nap # keep this import at the top if you prefer -def collect_similar_parameter_names(package, root_name=None, similarity_cutoff=0.8): - """ - Recursively collect and groups similar parameter names from functions and methods. - - This function traverses the given package and its submodules, extracting parameter - names from all user-defined functions and methods. Parameter names that are - lexically similar (based on `difflib.get_close_matches`) are grouped together. - This can be used to detect inconsistent naming conventions across a codebase. - - Parameters - ---------- - package : module - The root package to analyze (e.g., `pynapple`). - root_name : str, optional - The dotted name of the root package. If not provided, it is inferred from - `package.__name__`. - similarity_cutoff : float, optional - Similarity threshold between 0 and 1 used to group parameters based on - lexical similarity (default is 0.8). - - Returns - ------- - dict - A dictionary mapping canonical parameter names to a list of tuples. - Each tuple contains: - - The actual parameter name - - The fully qualified function or method path where it appears - - Example - ------- - { - "time": [("time", "pynapple.core.Tsd.__init__"), ("t", "pynapple.io.load")], - ... - } - """ - if root_name is None: - root_name = package.__name__ - - results = {} - visited_ids = set() - - def process_function(func, path): - try: - sig = inspect.signature(func) - param_names = list(sig.parameters) - for par in param_names: - if par in results: - continue # exact name already exists - match = difflib.get_close_matches(par, results.keys(), n=1, cutoff=similarity_cutoff) - if match: - results[match[0]].append((par, path)) - else: - results[par] = [(par, path)] - except Exception: - pass # some built-ins or extension modules may not support signature() - - def walk(obj, path_prefix=""): - if id(obj) in visited_ids: - return - visited_ids.add(id(obj)) - - if inspect.isfunction(obj) or inspect.ismethod(obj): - if getattr(obj, '__module__', '').startswith(root_name): - process_function(obj, path_prefix) - - elif inspect.isclass(obj): - if getattr(obj, '__module__', '').startswith(root_name): - for name, member in inspect.getmembers(obj): - if name.startswith("_"): - continue - walk(member, f"{path_prefix}.{name}") - - elif isinstance(obj, types.ModuleType): - if not getattr(obj, '__name__', '').startswith(root_name): - return # external module, skip - for name, member in inspect.getmembers(obj): - if name.startswith("_"): - continue - walk(member, f"{path_prefix}.{name}") - - walk(package, package.__name__) - return results +logger = logging.getLogger("check_parameter_naming") +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") if __name__ == "__main__": - import pynapple as nap - params = collect_similar_parameter_names(nap, similarity_cutoff=0.9) - for name, occurrences in params.copy().items(): + # Remove non-conflicting parameter names + for name, occurrences in list(params.items()): if len(occurrences) == 1: params.pop(name) - matches = [] - for name, occurrences in params.items(): - matches.append(f"{name}:\n") - for occurrence in occurrences: - matches.append(f"\t- {occurrence[1]}: {occurrence[0]}\n") - matches.append("\n") - if params: - raise ValueError("Inconsistency in parameter naming fonund!\n\n" + "".join(matches)) \ No newline at end of file + msg_lines = ["Inconsistency in parameter naming found!\n"] + for name, occurrences in params.items(): + msg_lines.append(f"{name}:\n") + for param_name, path in occurrences: + msg_lines.append(f"\t- {path}: {param_name}\n") + msg_lines.append("\n") + + logger.error("".join(msg_lines)) + sys.exit(1) + else: + logger.info("No parameter naming inconsistencies found.") From ccc69c1967bb9b26668f9a4784420c3cbe08e97b Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Thu, 17 Jul 2025 13:55:04 -0400 Subject: [PATCH 056/244] add logger --- scripts/check_parameter_naming.py | 96 +++++++++++++++++++++++++++++-- 1 file changed, 92 insertions(+), 4 deletions(-) diff --git a/scripts/check_parameter_naming.py b/scripts/check_parameter_naming.py index b0d36c714..ef1d5c898 100644 --- a/scripts/check_parameter_naming.py +++ b/scripts/check_parameter_naming.py @@ -1,14 +1,102 @@ +import difflib +import inspect +import types import logging import sys -import pynapple as nap # keep this import at the top if you prefer +import pynapple as nap + + +def collect_similar_parameter_names(package, root_name=None, similarity_cutoff=0.8): + """ + Recursively collect and group similar parameter names from functions and methods. + + This function traverses the given package and its submodules, extracting parameter + names from all user-defined functions and methods. Parameter names that are + lexically similar (based on `difflib.get_close_matches`) are grouped together. + This can be used to detect inconsistent naming conventions across a codebase. + + Parameters + ---------- + package : module + The root package to analyze (e.g., `pynapple`). + root_name : str, optional + The dotted name of the root package. If not provided, it is inferred from + `package.__name__`. + similarity_cutoff : float, optional + Similarity threshold between 0 and 1 used to group parameters based on + lexical similarity (default is 0.8). + + Returns + ------- + dict + A dictionary mapping canonical parameter names to a list of tuples. + Each tuple contains: + - The actual parameter name + - The fully qualified function or method path where it appears + + Example + ------- + { + "time": [("time", "pynapple.core.Tsd.__init__"), ("t", "pynapple.io.load")], + ... + } + """ + if root_name is None: + root_name = package.__name__ + + results = {} + visited_ids = set() + + def process_function(func, path): + try: + sig = inspect.signature(func) + param_names = list(sig.parameters) + for par in param_names: + if par in results: + continue # exact name already exists + match = difflib.get_close_matches(par, results.keys(), n=1, cutoff=similarity_cutoff) + if match: + results[match[0]].append((par, path)) + else: + results[par] = [(par, path)] + except Exception: + pass # some built-ins or extension modules may not support signature() + + def walk(obj, path_prefix=""): + if id(obj) in visited_ids: + return + visited_ids.add(id(obj)) + + if inspect.isfunction(obj) or inspect.ismethod(obj): + if getattr(obj, '__module__', '').startswith(root_name): + process_function(obj, path_prefix) + + elif inspect.isclass(obj): + if getattr(obj, '__module__', '').startswith(root_name): + for name, member in inspect.getmembers(obj): + if name.startswith("_"): + continue + walk(member, f"{path_prefix}.{name}") + + elif isinstance(obj, types.ModuleType): + if not getattr(obj, '__name__', '').startswith(root_name): + return # external module, skip + for name, member in inspect.getmembers(obj): + if name.startswith("_"): + continue + walk(member, f"{path_prefix}.{name}") + + walk(package, package.__name__) + return results -logger = logging.getLogger("check_parameter_naming") -logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") if __name__ == "__main__": + logger = logging.getLogger("check_parameter_naming") + logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") + params = collect_similar_parameter_names(nap, similarity_cutoff=0.9) - # Remove non-conflicting parameter names + # Filter out parameter names that occur only once for name, occurrences in list(params.items()): if len(occurrences) == 1: params.pop(name) From 5f0c17ffbc3e5c5bab8db9a129485d823669c845 Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Thu, 17 Jul 2025 14:08:01 -0400 Subject: [PATCH 057/244] fix logic --- scripts/check_parameter_naming.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/scripts/check_parameter_naming.py b/scripts/check_parameter_naming.py index ef1d5c898..42b7e5139 100644 --- a/scripts/check_parameter_naming.py +++ b/scripts/check_parameter_naming.py @@ -53,7 +53,8 @@ def process_function(func, path): param_names = list(sig.parameters) for par in param_names: if par in results: - continue # exact name already exists + results[par].append((par, path)) + continue # exact name already exists store match = difflib.get_close_matches(par, results.keys(), n=1, cutoff=similarity_cutoff) if match: results[match[0]].append((par, path)) @@ -91,14 +92,23 @@ def walk(obj, path_prefix=""): if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Check for inconsistent parameter naming.") + parser.add_argument( + "--threshold", "-t", type=float, default=0.9, + help="Similarity threshold (between 0 and 1) for grouping parameter names (default: 0.9)" + ) + args = parser.parse_args() + logger = logging.getLogger("check_parameter_naming") logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") - params = collect_similar_parameter_names(nap, similarity_cutoff=0.9) + params = collect_similar_parameter_names(nap, similarity_cutoff=args.threshold) # Filter out parameter names that occur only once for name, occurrences in list(params.items()): - if len(occurrences) == 1: + if all(o == name for o, _ in occurrences): params.pop(name) if params: From afab50794f4e0af5205d3772cbf0a21b59228c39 Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Thu, 17 Jul 2025 14:13:35 -0400 Subject: [PATCH 058/244] add a valid registry --- scripts/check_parameter_naming.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/scripts/check_parameter_naming.py b/scripts/check_parameter_naming.py index 42b7e5139..dd6164f44 100644 --- a/scripts/check_parameter_naming.py +++ b/scripts/check_parameter_naming.py @@ -5,6 +5,12 @@ import sys import pynapple as nap +VALID_PAIRS = [ + {"ep", "sep"}, + {"ts", "tsd"}, + {"args", "kwargs"}, + {"channel", "n_channels"}, +] def collect_similar_parameter_names(package, root_name=None, similarity_cutoff=0.8): """ @@ -56,7 +62,7 @@ def process_function(func, path): results[par].append((par, path)) continue # exact name already exists store match = difflib.get_close_matches(par, results.keys(), n=1, cutoff=similarity_cutoff) - if match: + if match and not {match[0], par} in VALID_PAIRS: results[match[0]].append((par, path)) else: results[par] = [(par, path)] @@ -96,7 +102,7 @@ def walk(obj, path_prefix=""): parser = argparse.ArgumentParser(description="Check for inconsistent parameter naming.") parser.add_argument( - "--threshold", "-t", type=float, default=0.9, + "--threshold", "-t", type=float, default=0.8, help="Similarity threshold (between 0 and 1) for grouping parameter names (default: 0.9)" ) args = parser.parse_args() From 7b6bb8b987361c65ae0c75428d4ea613876aedad Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Thu, 17 Jul 2025 14:24:51 -0400 Subject: [PATCH 059/244] change to warning --- .github/workflows/main.yml | 1 + scripts/check_parameter_naming.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 9c939f335..1fc976600 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -113,6 +113,7 @@ jobs: - lint - test - documentation + - check-param-naming runs-on: ubuntu-latest steps: - name: Decide whether all tests and notebooks succeeded diff --git a/scripts/check_parameter_naming.py b/scripts/check_parameter_naming.py index dd6164f44..ef9979da5 100644 --- a/scripts/check_parameter_naming.py +++ b/scripts/check_parameter_naming.py @@ -125,7 +125,7 @@ def walk(obj, path_prefix=""): msg_lines.append(f"\t- {path}: {param_name}\n") msg_lines.append("\n") - logger.error("".join(msg_lines)) + logger.warning("".join(msg_lines)) sys.exit(1) else: logger.info("No parameter naming inconsistencies found.") From ea5d5ff83160ef142108ef32987ec7091f998a1a Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Thu, 17 Jul 2025 14:27:14 -0400 Subject: [PATCH 060/244] change to warning --- scripts/check_parameter_naming.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/check_parameter_naming.py b/scripts/check_parameter_naming.py index ef9979da5..38876b23a 100644 --- a/scripts/check_parameter_naming.py +++ b/scripts/check_parameter_naming.py @@ -126,6 +126,6 @@ def walk(obj, path_prefix=""): msg_lines.append("\n") logger.warning("".join(msg_lines)) - sys.exit(1) + sys.exit(0) else: logger.info("No parameter naming inconsistencies found.") From d9d25f39626cc1affefca0de5b9b756ef9d42088 Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Thu, 17 Jul 2025 14:30:24 -0400 Subject: [PATCH 061/244] prepare it for when it is going to be enforced --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 1fc976600..b93170249 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -48,7 +48,7 @@ jobs: - name: Check parameter name consistency run: python scripts/check_parameter_naming.py test: - needs: lint + needs: [lint, check-param-naming] runs-on: ${{ matrix.os }} strategy: matrix: From 7014066a9f1eb977e0a5eabbe4399399b2b4fa77 Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Thu, 17 Jul 2025 14:30:54 -0400 Subject: [PATCH 062/244] add comment --- scripts/check_parameter_naming.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/check_parameter_naming.py b/scripts/check_parameter_naming.py index 38876b23a..998f52067 100644 --- a/scripts/check_parameter_naming.py +++ b/scripts/check_parameter_naming.py @@ -126,6 +126,7 @@ def walk(obj, path_prefix=""): msg_lines.append("\n") logger.warning("".join(msg_lines)) + # TODO: change this sys.exit(1) to fail the CI sys.exit(0) else: logger.info("No parameter naming inconsistencies found.") From cc82a98578e4b463aecf61bfa2635c6b20d235bb Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Thu, 17 Jul 2025 14:31:23 -0400 Subject: [PATCH 063/244] linted --- scripts/check_parameter_naming.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/scripts/check_parameter_naming.py b/scripts/check_parameter_naming.py index 998f52067..164c15f5b 100644 --- a/scripts/check_parameter_naming.py +++ b/scripts/check_parameter_naming.py @@ -1,8 +1,9 @@ import difflib import inspect -import types import logging import sys +import types + import pynapple as nap VALID_PAIRS = [ @@ -12,6 +13,7 @@ {"channel", "n_channels"}, ] + def collect_similar_parameter_names(package, root_name=None, similarity_cutoff=0.8): """ Recursively collect and group similar parameter names from functions and methods. @@ -61,7 +63,9 @@ def process_function(func, path): if par in results: results[par].append((par, path)) continue # exact name already exists store - match = difflib.get_close_matches(par, results.keys(), n=1, cutoff=similarity_cutoff) + match = difflib.get_close_matches( + par, results.keys(), n=1, cutoff=similarity_cutoff + ) if match and not {match[0], par} in VALID_PAIRS: results[match[0]].append((par, path)) else: @@ -75,18 +79,18 @@ def walk(obj, path_prefix=""): visited_ids.add(id(obj)) if inspect.isfunction(obj) or inspect.ismethod(obj): - if getattr(obj, '__module__', '').startswith(root_name): + if getattr(obj, "__module__", "").startswith(root_name): process_function(obj, path_prefix) elif inspect.isclass(obj): - if getattr(obj, '__module__', '').startswith(root_name): + if getattr(obj, "__module__", "").startswith(root_name): for name, member in inspect.getmembers(obj): if name.startswith("_"): continue walk(member, f"{path_prefix}.{name}") elif isinstance(obj, types.ModuleType): - if not getattr(obj, '__name__', '').startswith(root_name): + if not getattr(obj, "__name__", "").startswith(root_name): return # external module, skip for name, member in inspect.getmembers(obj): if name.startswith("_"): @@ -100,10 +104,15 @@ def walk(obj, path_prefix=""): if __name__ == "__main__": import argparse - parser = argparse.ArgumentParser(description="Check for inconsistent parameter naming.") + parser = argparse.ArgumentParser( + description="Check for inconsistent parameter naming." + ) parser.add_argument( - "--threshold", "-t", type=float, default=0.8, - help="Similarity threshold (between 0 and 1) for grouping parameter names (default: 0.9)" + "--threshold", + "-t", + type=float, + default=0.8, + help="Similarity threshold (between 0 and 1) for grouping parameter names (default: 0.9)", ) args = parser.parse_args() From 063e1d287c9fdcf07a55a50d8816429f709609e8 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 18 Jul 2025 17:26:35 +0000 Subject: [PATCH 064/244] testing --- pynapple/process/decoding.py | 369 +++++++++------------------------- tests/test_decoding.py | 374 ++++++++++++++++++++++++++++------- 2 files changed, 397 insertions(+), 346 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 83be005e0..ac2c4110f 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -2,270 +2,15 @@ Decoding functions for timestamps data (spike times). The first argument is always a tuning curves object. """ +import warnings + import numpy as np import xarray as xr from .. import core as nap -def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): - """ - Perform Bayesian decoding over a one dimensional feature. - See: - Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. - (1998). Interpreting neuronal population activity by - reconstruction: unified framework with application to - hippocampal place cells. Journal of neurophysiology, 79(2), - 1017-1044. - - Parameters - ---------- - tuning_curves : pandas.DataFrame - Each column is the tuning curve of one neuron relative to the feature. - Index should be the center of the bin. - group : TsGroup, TsdFrame or dict of Ts/Tsd object. - A group of neurons with the same index as tuning curves column names. - You may also pass a TsdFrame with smoothed rates (recommended). - ep : IntervalSet - The epoch on which decoding is computed - bin_size : float - Bin size. Default is second. Use the parameter time_units to change it. - time_units : str, optional - Time unit of the bin size ('s' [default], 'ms', 'us'). - feature : Tsd, optional - The 1d feature used to compute the tuning curves. Used to correct for occupancy. - If feature is not passed, the occupancy is uniform. - - Returns - ------- - Tsd - The decoded feature - TsdFrame - The probability distribution of the decoded feature for each time bin - - Raises - ------ - RuntimeError - If group is not a dict of Ts/Tsd or TsGroup. - If different size of neurons for tuning_curves and group. - If indexes don't match between tuning_curves and group. - """ - if isinstance(group, nap.TsdFrame): - newgroup = group.restrict(ep) - - if tuning_curves.shape[1] != newgroup.shape[1]: - raise RuntimeError("Different shapes for tuning_curves and group") - - if not np.all(tuning_curves.columns.values == np.array(newgroup.columns)): - raise RuntimeError("Different indices for tuning curves and group keys") - - count = group - - elif isinstance(group, nap.TsGroup): - newgroup = group.restrict(ep) - - if tuning_curves.shape[1] != len(newgroup): - raise RuntimeError("Different shapes for tuning_curves and group") - - if not np.all(tuning_curves.columns.values == np.array(newgroup.keys())): - raise RuntimeError("Different indices for tuning curves and group keys") - - # Bin spikes - count = newgroup.count(bin_size, ep, time_units) - - elif isinstance(group, dict): - newgroup = nap.TsGroup(group, time_support=ep) - count = newgroup.count(bin_size, ep, time_units) - - else: - raise RuntimeError("Unknown format for group") - - # Occupancy - if feature is None: - occupancy = np.ones(tuning_curves.shape[0]) - elif isinstance(feature, nap.Tsd): - diff = np.diff(tuning_curves.index.values) - bins = tuning_curves.index.values[:-1] - diff / 2 - bins = np.hstack( - (bins, [bins[-1] + diff[-1], bins[-1] + 2 * diff[-1]]) - ) # assuming the size of the last 2 bins is equal - occupancy, _ = np.histogram(feature.values, bins) - else: - raise RuntimeError("Unknown format for feature in decode_1d") - - # Transforming to pure numpy array - tc = tuning_curves.values - ct = count.values - - bin_size_s = nap.TsIndex.format_timestamps( - np.array([bin_size], dtype=np.float64), time_units - )[0] - - p1 = np.exp(-bin_size_s * tc.sum(1)) - p2 = occupancy / occupancy.sum() - - ct2 = np.tile(ct[:, np.newaxis, :], (1, tc.shape[0], 1)) - - p3 = np.prod(tc**ct2, -1) - - p = p1 * p2 * p3 - p = p / p.sum(1)[:, np.newaxis] - - idxmax = np.argmax(p, 1) - - p = nap.TsdFrame( - t=count.index, d=p, time_support=ep, columns=tuning_curves.index.values - ) - - decoded = nap.Tsd( - t=count.index, d=tuning_curves.index.values[idxmax], time_support=ep - ) - - return decoded, p - - -def decode_2d(tuning_curves, group, ep, bin_size, xy, time_units="s", features=None): - """ - Performs Bayesian decoding over 2 dimensional features. - - See: - Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. - (1998). Interpreting neuronal population activity by - reconstruction: unified framework with application to - hippocampal place cells. Journal of neurophysiology, 79(2), - 1017-1044. - - Parameters - ---------- - tuning_curves : dict - Dictionary of 2d tuning curves (one for each neuron). - group : TsGroup, TsdFrame or dict of Ts/Tsd object. - A group of neurons with the same keys as tuning_curves dictionary. - You may also pass a TsdFrame with smoothed rates (recommended). - ep : IntervalSet - The epoch on which decoding is computed - bin_size : float - Bin size. Default is second. Use the parameter time_units to change it. - xy : tuple - A tuple of bin positions for the tuning curves i.e. xy=(x,y) - time_units : str, optional - Time unit of the bin size ('s' [default], 'ms', 'us'). - features : TsdFrame - The 2 columns features used to compute the tuning curves. Used to correct for occupancy. - If feature is not passed, the occupancy is uniform. - - Returns - ------- - Tsd - The decoded feature in 2d - numpy.ndarray - The probability distribution of the decoded trajectory for each time bin - - Raises - ------ - RuntimeError - If group is not a dict of Ts/Tsd or TsGroup. - If different size of neurons for tuning_curves and group. - If indexes don't match between tuning_curves and group. - - """ - - if type(group) is nap.TsdFrame: - newgroup = group.restrict(ep) - numcells = newgroup.shape[1] - - if len(tuning_curves) != numcells: - raise RuntimeError("Different shapes for tuning_curves and group") - - if not np.all( - np.array(list(tuning_curves.keys())) == np.array(newgroup.columns) - ): - raise RuntimeError("Different indices for tuning curves and group keys") - - count = group - - elif type(group) is nap.TsGroup: - newgroup = group.restrict(ep) - numcells = len(newgroup) - - if len(tuning_curves) != numcells: - raise RuntimeError("Different shapes for tuning_curves and group") - - if not np.all( - np.array(list(tuning_curves.keys())) == np.array(newgroup.keys()) - ): - raise RuntimeError("Different indices for tuning curves and group keys") - - count = newgroup.count(bin_size, ep, time_units) - - elif type(group) is dict: - newgroup = nap.TsGroup(group, time_support=ep) - count = newgroup.count(bin_size, ep, time_units) - - else: - raise RuntimeError("Unknown format for group") - - indexes = list(tuning_curves.keys()) - - # Occupancy - if features is None: - occupancy = np.ones_like(tuning_curves[indexes[0]]).flatten() - else: - binsxy = [] - for i in range(len(xy)): - diff = np.diff(xy[i]) - bins = xy[i][:-1] - diff / 2 - bins = np.hstack( - (bins, [bins[-1] + diff[-1], bins[-1] + 2 * diff[-1]]) - ) # assuming the size of the last 2 bins is equal - binsxy.append(bins) - - occupancy, _, _ = np.histogram2d( - features[:, 0].values, features[:, 1].values, [binsxy[0], binsxy[1]] - ) - occupancy = occupancy.flatten() - - # Transforming to pure numpy array - tc = np.array([tuning_curves[i] for i in tuning_curves.keys()]) - tc = tc.reshape(tc.shape[0], np.prod(tc.shape[1:])) - tc = tc.T - ct = count.values - bin_size_s = nap.TsIndex.format_timestamps( - np.array([bin_size], dtype=np.float64), time_units - )[0] - - p1 = np.exp(-bin_size_s * np.nansum(tc, 1)) - p2 = occupancy / occupancy.sum() - - ct2 = np.tile(ct[:, np.newaxis, :], (1, tc.shape[0], 1)) - - p3 = np.nanprod(tc**ct2, -1) - - p = p1 * p2 * p3 - p = p / p.sum(1)[:, np.newaxis] - - idxmax = np.argmax(p, 1) - - p = p.reshape(p.shape[0], len(xy[0]), len(xy[1])) - - idxmax2d = np.unravel_index(idxmax, (len(xy[0]), len(xy[1]))) - - if features is not None: - cols = features.columns - else: - cols = np.arange(2) - - decoded = nap.TsdFrame( - t=count.index, - d=np.vstack((xy[0][idxmax2d[0]], xy[1][idxmax2d[1]])).T, - time_support=ep, - columns=cols, - ) - - return decoded, p - - -def decode(tuning_curves, group, epochs, bin_size, time_units="s"): +def decode(tuning_curves, group, epochs, bin_size, time_units="s", use_occupancy=False): """ Performs Bayesian decoding over n-dimensional features. @@ -289,21 +34,17 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s"): Bin size. Default is second. Use the parameter time_units to change it. time_units : str, optional Time unit of the bin size ('s' [default], 'ms', 'us'). + use_occupancy : bool, optional + If True, uses the occupancy from the tuning curves to adjust the probability distribution. + If False, uses a uniform distribution. Default is False. Returns ------- Tsd The decoded feature - numpy.ndarray + TsdFrame, TsdTensor The probability distribution of the decoded trajectory for each time bin - Raises - ------ - RuntimeError - If group is not a dict of Ts/Tsd or TsGroup. - If different size of neurons for tuning_curves and group. - If indexes don't match between tuning_curves and group. - """ # check tuning curves @@ -317,10 +58,10 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s"): numcells = len(group) if tuning_curves.sizes["unit"] != numcells: - raise RuntimeError("Different shapes for tuning_curves and group") + raise ValueError("Different shapes for tuning_curves and group") if not np.all(tuning_curves.coords["unit"] == np.array(list(group.keys()))): - raise RuntimeError("Different indices for tuning curves and group keys") + raise ValueError("Different indices for tuning curves and group keys") if isinstance(group, dict): group = nap.TsGroup(group, time_support=epochs) @@ -329,17 +70,24 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s"): numcells = group.shape[1] if tuning_curves.sizes["unit"] != numcells: - raise RuntimeError("Different shapes for tuning_curves and group") + raise ValueError("Different shapes for tuning_curves and group") if not np.all(tuning_curves.coords["unit"] == group.columns): - raise RuntimeError("Different indices for tuning curves and group keys") + raise ValueError("Different indices for tuning curves and group keys") count = group else: - raise RuntimeError("Unknown format for group") - - if "occupancy" in tuning_curves.dims: - occupancy = tuning_curves.coords["occupancy"].values.flatten() + raise TypeError("Unknown format for group.") + + if use_occupancy: + if "occupancy" not in tuning_curves.attrs: + raise ValueError( + "use_occupancy set to True but no occupancy found in tuning curves" + ) + occupancy = tuning_curves.attrs["occupancy"] + if occupancy.shape != tuning_curves.shape[1:]: + raise ValueError("Occupancy shape does not match tuning curves shape.") + occupancy = occupancy.flatten() else: occupancy = np.ones_like(tuning_curves[0]).flatten() @@ -392,3 +140,76 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s"): ) return decoded, p + + +def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): + warnings.warn( + "decode_1d is deprecated and will be removed in v1.0; use decode instead.", + DeprecationWarning, + stacklevel=2, + ) + # Occupancy + if feature is None: + occupancy = np.ones(tuning_curves.shape[0]) + elif isinstance(feature, nap.Tsd): + diff = np.diff(tuning_curves.index.values) + bins = tuning_curves.index.values[:-1] - diff / 2 + bins = np.hstack( + (bins, [bins[-1] + diff[-1], bins[-1] + 2 * diff[-1]]) + ) # assuming the size of the last 2 bins is equal + occupancy, _ = np.histogram(feature.values, bins) + else: + raise RuntimeError("Unknown format for feature in decode_1d") + return decode( + xr.DataArray( + data=tuning_curves.values, + coords={ + "unit": tuning_curves.columns.values, + "0": tuning_curves.index.values, + }, + attrs={"occupancy": occupancy}, + ), + group, + ep, + bin_size, + time_units=time_units, + use_occupancy=feature is not None, + ) + + +def decode_2d(tuning_curves, group, ep, bin_size, xy, time_units="s", features=None): + warnings.warn( + "decode_2d is deprecated and will be removed in v1.0; use decode instead.", + DeprecationWarning, + stacklevel=2, + ) + # Occupancy + indexes = list(tuning_curves.keys()) + if features is None: + occupancy = np.ones_like(tuning_curves[indexes[0]]).flatten() + else: + binsxy = [] + for i in range(len(xy)): + diff = np.diff(xy[i]) + bins = xy[i][:-1] - diff / 2 + bins = np.hstack( + (bins, [bins[-1] + diff[-1], bins[-1] + 2 * diff[-1]]) + ) # assuming the size of the last 2 bins is equal + binsxy.append(bins) + + occupancy, _, _ = np.histogram2d( + features[:, 0].values, features[:, 1].values, [binsxy[0], binsxy[1]] + ) + occupancy = occupancy.flatten() + return decode( + xr.DataArray( + data=[tuning_curves[i] for i in indexes], + coords={"unit": indexes, "0": xy[0], "1": xy[1]}, + attrs={"occupancy": occupancy}, + ), + group, + ep, + bin_size, + time_units=time_units, + use_occupancy=features is not None, + ) diff --git a/tests/test_decoding.py b/tests/test_decoding.py index a323785a4..95abb200e 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -1,32 +1,260 @@ -# -*- coding: utf-8 -*- -# @Author: gviejo -# @Date: 2022-03-30 11:16:39 -# @Last Modified by: Guillaume Viejo -# @Last Modified time: 2024-01-29 11:15:41 -#!/usr/bin/env python - """Tests of decoding for `pynapple` package.""" +from contextlib import nullcontext as does_not_raise + import numpy as np import pytest -import xarray as xr import pynapple as nap +def get_testing_set_n(n_units=1, n_features=1, binned=False): + features = nap.TsdFrame( + t=np.arange(0, 100, 1), + d=np.stack([np.repeat(np.arange(0, 2), 50) for _ in range(n_features)], axis=1), + ) + group = nap.TsGroup( + {i: nap.Ts(t=np.arange(0, 50) + 50 * i) for i in range(n_units)} + ) + if binned: + group = group.count(bin_size=1) + tc = nap.compute_tuning_curves( + group=group, features=features, bins=2, range=[(-0.5, 1.5)] * n_features + ) + epochs = nap.IntervalSet(start=0, end=100) + return {"tuning_curves": tc, "group": group, "epochs": epochs, "bin_size": 1} + + +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize( + "overwrite_default_args, expectation", + [ + # tuning_curves + ( + {"tuning_curves": []}, + pytest.raises( + TypeError, + match="tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves", + ), + ), + ( + {"tuning_curves": 1}, + pytest.raises( + TypeError, + match="tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves", + ), + ), + ( + {"tuning_curves": get_testing_set_n()["tuning_curves"].to_pandas().T}, + pytest.raises( + TypeError, + match="tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves", + ), + ), + ( + {"tuning_curves": get_testing_set_n(n_units=2)["tuning_curves"]}, + pytest.raises( + ValueError, + match="Different shapes for tuning_curves and group", + ), + ), + ( + { + "tuning_curves": get_testing_set_n(n_units=1)[ + "tuning_curves" + ].assign_coords(unit=[3]) + }, + pytest.raises( + ValueError, + match="Different indices for tuning curves and group keys", + ), + ), + ({}, does_not_raise()), + (get_testing_set_n(1, 2), does_not_raise()), + (get_testing_set_n(1, 3), does_not_raise()), + (get_testing_set_n(2, 1), does_not_raise()), + (get_testing_set_n(2, 2), does_not_raise()), + (get_testing_set_n(2, 3), does_not_raise()), + (get_testing_set_n(3, 1), does_not_raise()), + (get_testing_set_n(3, 2), does_not_raise()), + (get_testing_set_n(3, 3), does_not_raise()), + # group + ( + {"group": []}, + pytest.raises( + TypeError, + match="Unknown format for group.", + ), + ), + ( + {"group": 1}, + pytest.raises( + TypeError, + match="Unknown format for group.", + ), + ), + ( + {"group": get_testing_set_n(2)["group"]}, + pytest.raises( + ValueError, + match="Different shapes for tuning_curves and group", + ), + ), + ( + {"group": nap.TsGroup({2: nap.Ts(t=np.arange(0, 50))})}, + pytest.raises( + ValueError, + match="Different indices for tuning curves and group keys", + ), + ), + ( + {"group": get_testing_set_n(binned=True)["group"]}, + does_not_raise(), + ), + ( + get_testing_set_n(2, binned=True), + does_not_raise(), + ), + ( + get_testing_set_n(3, binned=True), + does_not_raise(), + ), + # use_occupancy + ( + { + "use_occupancy": True, + "tuning_curves": (lambda x: (x.attrs.clear(), x)[1])( + get_testing_set_n()["tuning_curves"] + ), + }, + pytest.raises( + ValueError, + match="use_occupancy set to True but no occupancy found in tuning curves", + ), + ), + ( + { + "use_occupancy": True, + "tuning_curves": get_testing_set_n(1)["tuning_curves"].assign_attrs( + {"occupancy": np.array([1, 2, 3])} + ), + }, + pytest.raises( + ValueError, + match="Occupancy shape does not match tuning curves shape.", + ), + ), + ( + {"use_occupancy": True}, + does_not_raise(), + ), + ], +) +def test_decode_type_errors(overwrite_default_args, expectation): + default_args = get_testing_set_n(1) + default_args.update(overwrite_default_args) + with expectation: + nap.decode(**default_args) + + +# def test_decode_1d(): +# feature, group, tc, epochs = get_testing_set_n(1) +# decoded, proba = nap.decode(tc, group, epochs, bin_size=1) +# assert isinstance(decoded, nap.Tsd) +# assert isinstance(proba, nap.TsdFrame) +# np.testing.assert_array_almost_equal(feature.values, decoded.values) +# assert len(decoded) == 100 +# assert len(proba) == 100 +# tmp = np.ones((100, 2)) +# tmp[50:, 0] = 0.0 +# tmp[0:50, 1] = 0.0 +# np.testing.assert_array_almost_equal(proba.values, tmp) +# +# +# def test_decode_1d_with_TsdFrame(): +# feature, group, tc, epochs = get_testing_set_n(1) +# count = group.count(bin_size=1, ep=epochs) +# decoded, proba = nap.decode(tc, count, epochs, bin_size=1) +# assert isinstance(decoded, nap.Tsd) +# assert isinstance(proba, nap.TsdFrame) +# np.testing.assert_array_almost_equal(feature.values, decoded.values) +# assert len(decoded) == 100 +# assert len(proba) == 100 +# tmp = np.ones((100, 2)) +# tmp[50:, 0] = 0.0 +# tmp[0:50, 1] = 0.0 +# np.testing.assert_array_almost_equal(proba.values, tmp) +# +# +# def test_decode_1d_with_occupancy(): +# feature, group, tc, epochs = get_testing_set_n(1) +# decoded, proba = nap.decode(tc, group, epochs, bin_size=1, use_occupancy=True) +# np.testing.assert_array_almost_equal(feature.values, decoded.values) +# assert isinstance(decoded, nap.Tsd) +# assert isinstance(proba, nap.TsdFrame) +# np.testing.assert_array_almost_equal(feature.values, decoded.values) +# +# +# def test_decode_1d_with_dict(): +# feature, group, tc, epochs = get_testing_set_n(1) +# group = dict(group) +# decoded, proba = nap.decode(tc, group, epochs, bin_size=1) +# assert isinstance(decoded, nap.Tsd) +# assert isinstance(proba, nap.TsdFrame) +# np.testing.assert_array_almost_equal(feature.values, decoded.values) +# assert len(decoded) == 100 +# assert len(proba) == 100 +# tmp = np.ones((100, 2)) +# tmp[50:, 0] = 0.0 +# tmp[0:50, 1] = 0.0 +# np.testing.assert_array_almost_equal(proba.values, tmp) +# +# +# def test_decode_1d_with_time_units(): +# feature, group, tc, epochs = get_testing_set_n(1) +# for t, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): +# decoded, proba = nap.decode(tc, group, epochs, 1.0 * t, time_units=tu) +# np.testing.assert_array_almost_equal(feature.values, decoded.values) +# +# +# def test_decoded_1d_raise_errors(): +# feature, group, tc, epochs = get_testing_set_n(1) +# with pytest.raises(Exception) as e_info: +# nap.decode(tc, np.random.rand(10), epochs, 1) +# assert str(e_info.value) == "Unknown format for group" +# +# feature, group, tc, epochs = get_testing_set_n(1) +# _tc = xr.DataArray(data=np.random.rand(10, 3), dims=["time", "unit"]) +# with pytest.raises(Exception) as e_info: +# nap.decode(_tc, group, epochs, 1) +# assert str(e_info.value) == "Different shapes for tuning_curves and group" +# +# feature, group, tc, epochs = get_testing_set_n(1) +# tc.coords["unit"] = [0, 2] +# with pytest.raises(Exception) as e_info: +# nap.decode(tc, group, epochs, 1) +# assert str(e_info.value) == "Different indices for tuning curves and group keys" + +# ------------------------------------------------------------------------------------ +# OLD DECODING TESTS +# ------------------------------------------------------------------------------------ + + +@pytest.mark.filterwarnings("ignore") def get_testing_set_1d(): feature = nap.Tsd(t=np.arange(0, 100, 1), d=np.repeat(np.arange(0, 2), 50)) group = nap.TsGroup({i: nap.Ts(t=np.arange(0, 50) + 50 * i) for i in range(2)}) - tc = nap.compute_tuning_curves( - group=group, features=feature, bins=2, range=(-0.5, 1.5) + tc = nap.compute_1d_tuning_curves( + group=group, feature=feature, nb_bins=2, minmax=(-0.5, 1.5) ) - epochs = nap.IntervalSet(start=0, end=100) - return feature, group, tc, epochs + ep = nap.IntervalSet(start=0, end=100) + return feature, group, tc, ep +@pytest.mark.filterwarnings("ignore") def test_decode_1d(): - feature, group, tc, epochs = get_testing_set_1d() - decoded, proba = nap.decode(tc, group, epochs, bin_size=1) + feature, group, tc, ep = get_testing_set_1d() + decoded, proba = nap.decode_1d(tc, group, ep, bin_size=1) assert isinstance(decoded, nap.Tsd) assert isinstance(proba, nap.TsdFrame) np.testing.assert_array_almost_equal(feature.values, decoded.values) @@ -38,10 +266,27 @@ def test_decode_1d(): np.testing.assert_array_almost_equal(proba.values, tmp) +@pytest.mark.filterwarnings("ignore") def test_decode_1d_with_TsdFrame(): - feature, group, tc, epochs = get_testing_set_1d() - count = group.count(bin_size=1, ep=epochs) - decoded, proba = nap.decode(tc, count, epochs, bin_size=1) + feature, group, tc, ep = get_testing_set_1d() + count = group.count(bin_size=1, ep=ep) + decoded, proba = nap.decode_1d(tc, count, ep, bin_size=1) + assert isinstance(decoded, nap.Tsd) + assert isinstance(proba, nap.TsdFrame) + np.testing.assert_array_almost_equal(feature.values, decoded.values) + assert len(decoded) == 100 + assert len(proba) == 100 + tmp = np.ones((100, 2)) + tmp[50:, 0] = 0.0 + tmp[0:50, 1] = 0.0 + np.testing.assert_array_almost_equal(proba.values, tmp) + + +@pytest.mark.filterwarnings("ignore") +def test_decode_1d_with_feature(): + feature, group, tc, ep = get_testing_set_1d() + decoded, proba = nap.decode_1d(tc, group, ep, bin_size=1, feature=feature) + np.testing.assert_array_almost_equal(feature.values, decoded.values) assert isinstance(decoded, nap.Tsd) assert isinstance(proba, nap.TsdFrame) np.testing.assert_array_almost_equal(feature.values, decoded.values) @@ -53,10 +298,12 @@ def test_decode_1d_with_TsdFrame(): np.testing.assert_array_almost_equal(proba.values, tmp) +@pytest.mark.filterwarnings("ignore") def test_decode_1d_with_dict(): - feature, group, tc, epochs = get_testing_set_1d() + feature, group, tc, ep = get_testing_set_1d() group = dict(group) - decoded, proba = nap.decode(tc, group, epochs, bin_size=1) + decoded, proba = nap.decode_1d(tc, group, ep, bin_size=1, feature=feature) + np.testing.assert_array_almost_equal(feature.values, decoded.values) assert isinstance(decoded, nap.Tsd) assert isinstance(proba, nap.TsdFrame) np.testing.assert_array_almost_equal(feature.values, decoded.values) @@ -68,32 +315,23 @@ def test_decode_1d_with_dict(): np.testing.assert_array_almost_equal(proba.values, tmp) +@pytest.mark.filterwarnings("ignore") +def test_decode_1d_with_wrong_feature(): + feature, group, tc, ep = get_testing_set_1d() + with pytest.raises(RuntimeError) as e_info: + nap.decode_1d(tc, group, ep, bin_size=1, feature=[1, 2, 3]) + assert str(e_info.value) == "Unknown format for feature in decode_1d" + + +@pytest.mark.filterwarnings("ignore") def test_decode_1d_with_time_units(): - feature, group, tc, epochs = get_testing_set_1d() + feature, group, tc, ep = get_testing_set_1d() for t, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): - decoded, proba = nap.decode(tc, group, epochs, 1.0 * t, time_units=tu) + decoded, proba = nap.decode_1d(tc, group, ep, 1.0 * t, time_units=tu) np.testing.assert_array_almost_equal(feature.values, decoded.values) -def test_decoded_1d_raise_errors(): - feature, group, tc, epochs = get_testing_set_1d() - with pytest.raises(Exception) as e_info: - nap.decode(tc, np.random.rand(10), epochs, 1) - assert str(e_info.value) == "Unknown format for group" - - feature, group, tc, epochs = get_testing_set_1d() - _tc = xr.DataArray(data=np.random.rand(10, 3), dims=["time", "unit"]) - with pytest.raises(Exception) as e_info: - nap.decode(_tc, group, epochs, 1) - assert str(e_info.value) == "Different shapes for tuning_curves and group" - - feature, group, tc, epochs = get_testing_set_1d() - tc.coords["unit"] = [0, 2] - with pytest.raises(Exception) as e_info: - nap.decode(tc, group, epochs, 1) - assert str(e_info.value) == "Different indices for tuning curves and group keys" - - +@pytest.mark.filterwarnings("ignore") def get_testing_set_2d(): features = nap.TsdFrame( t=np.arange(0, 100, 1), @@ -108,16 +346,17 @@ def get_testing_set_2d(): } ) - tc = nap.compute_tuning_curves( - group=group, features=features, bins=2, range=[(-0.5, 1.5), (-0.5, 1.5)] + tc, xy = nap.compute_2d_tuning_curves( + group=group, features=features, nb_bins=2, minmax=(-0.5, 1.5, -0.5, 1.5) ) - epochs = nap.IntervalSet(start=0, end=100) - return features, group, tc, epochs + ep = nap.IntervalSet(start=0, end=100) + return features, group, tc, ep, tuple(xy) +@pytest.mark.filterwarnings("ignore") def test_decode_2d(): - features, group, tc, epochs = get_testing_set_2d() - decoded, proba = nap.decode(tc, group, epochs, 1) + features, group, tc, ep, xy = get_testing_set_2d() + decoded, proba = nap.decode_2d(tc, group, ep, 1, xy) assert isinstance(decoded, nap.TsdFrame) assert isinstance(proba, nap.TsdTensor) @@ -135,10 +374,11 @@ def test_decode_2d(): np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) +@pytest.mark.filterwarnings("ignore") def test_decode_2d_with_TsdFrame(): - features, group, tc, epochs = get_testing_set_2d() - count = group.count(bin_size=1, ep=epochs) - decoded, proba = nap.decode(tc, count, epochs, 1) + features, group, tc, ep, xy = get_testing_set_2d() + count = group.count(bin_size=1, ep=ep) + decoded, proba = nap.decode_2d(tc, count, ep, 1, xy) assert isinstance(decoded, nap.TsdFrame) assert isinstance(proba, nap.TsdTensor) @@ -156,10 +396,11 @@ def test_decode_2d_with_TsdFrame(): np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) +@pytest.mark.filterwarnings("ignore") def test_decode_2d_with_dict(): - features, group, tc, epochs = get_testing_set_2d() + features, group, tc, ep, xy = get_testing_set_2d() group = dict(group) - decoded, proba = nap.decode(tc, group, epochs, 1) + decoded, proba = nap.decode_2d(tc, group, ep, 1, xy) assert isinstance(decoded, nap.TsdFrame) assert isinstance(proba, nap.TsdTensor) @@ -177,27 +418,16 @@ def test_decode_2d_with_dict(): np.testing.assert_array_almost_equal(proba[:, :, 1], tmp) +@pytest.mark.filterwarnings("ignore") +def test_decode_2d_with_feature(): + features, group, tc, ep, xy = get_testing_set_2d() + decoded, proba = nap.decode_2d(tc, group, ep, 1, xy) + np.testing.assert_array_almost_equal(features.values, decoded.values) + + +@pytest.mark.filterwarnings("ignore") def test_decode_2d_with_time_units(): - features, group, tc, epochs = get_testing_set_2d() + features, group, tc, ep, xy = get_testing_set_2d() for t, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): - decoded, proba = nap.decode(tc, group, epochs, 1.0 * t, time_units=tu) + decoded, proba = nap.decode_2d(tc, group, ep, 1.0 * t, xy, time_units=tu) np.testing.assert_array_almost_equal(features.values, decoded.values) - - -def test_decoded_2d_raise_errors(): - features, group, tc, epochs = get_testing_set_2d() - with pytest.raises(Exception) as e_info: - nap.decode(tc, np.random.rand(10), epochs, 1) - assert str(e_info.value) == "Unknown format for group" - - features, group, tc, epochs = get_testing_set_2d() - tc = xr.DataArray(data=np.random.rand(10, 3), dims=["time", "unit"]) - with pytest.raises(Exception) as e_info: - nap.decode(tc, group, epochs, 1) - assert str(e_info.value) == "Different shapes for tuning_curves and group" - - features, group, tc, epochs = get_testing_set_2d() - tc.coords["unit"] = [0, 2, 4, 6] - with pytest.raises(Exception) as e_info: - nap.decode(tc, group, epochs, 1) - assert str(e_info.value) == "Different indices for tuning curves and group keys" From eab4737354b69b9f8ec66905cffc4c62d98f1e8f Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 18 Jul 2025 17:28:21 +0000 Subject: [PATCH 065/244] swap tuning curve notebook order --- doc/user_guide/06_tuning_curves.md | 44 ++++++++++++++---------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index 7b672fd99..38736cb27 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -41,29 +41,7 @@ group = { tsgroup = nap.TsGroup(group) ``` - -## From epochs - -When computing from epochs, you should store them in a dictionary: - - -```{code-cell} ipython3 -dict_ep = { - "stim0": nap.IntervalSet(start=0, end=20), - "stim1":nap.IntervalSet(start=30, end=70) -} -``` - -[`nap.compute_discrete_tuning_curves`](pynapple.process.tuning_curves.compute_discrete_tuning_curves) takes a `TsGroup` for spiking activity and a dictionary of epochs. -The output is a pandas DataFrame where each column is a unit in the `TsGroup` and each row is one `IntervalSet`. -The output will be the mean firing rate of the neuron during this set of intervals. - -```{code-cell} ipython3 -mean_fr = nap.compute_discrete_tuning_curves(tsgroup, dict_ep) -print(mean_fr) -``` - -## From timestamps +## From timestamps or continuous activity ```{code-cell} ipython3 :tags: [hide-cell] @@ -355,3 +333,23 @@ tuning_curves_2d.attrs["unit"]="a.u." tuning_curves_2d.plot(col="unit") plt.show() ``` + +## From epochs + +When computing from epochs, you should store them in a dictionary: + +```{code-cell} ipython3 +dict_ep = { + "stim0": nap.IntervalSet(start=0, end=20), + "stim1":nap.IntervalSet(start=30, end=70) +} +``` + +[`nap.compute_discrete_tuning_curves`](pynapple.process.tuning_curves.compute_discrete_tuning_curves) takes a `TsGroup` for spiking activity and a dictionary of epochs. +The output is a pandas DataFrame where each column is a unit in the `TsGroup` and each row is one `IntervalSet`. +The output will be the mean firing rate of the neuron during this set of intervals. + +```{code-cell} ipython3 +mean_fr = nap.compute_discrete_tuning_curves(tsgroup, dict_ep) +print(mean_fr) +``` From 9dbba773eec3ddbe285ff1a21f199d1069b9e509 Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Fri, 18 Jul 2025 13:34:47 -0400 Subject: [PATCH 066/244] check all methods --- scripts/check_parameter_naming.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/scripts/check_parameter_naming.py b/scripts/check_parameter_naming.py index 164c15f5b..29c378092 100644 --- a/scripts/check_parameter_naming.py +++ b/scripts/check_parameter_naming.py @@ -84,10 +84,11 @@ def walk(obj, path_prefix=""): elif inspect.isclass(obj): if getattr(obj, "__module__", "").startswith(root_name): - for name, member in inspect.getmembers(obj): - if name.startswith("_"): - continue - walk(member, f"{path_prefix}.{name}") + for attr in inspect.classify_class_attrs(obj): + # attrs is a convenient named tuple with fields + # (name, kind, defining_class, object) + if attr.kind == "method": + process_function(attr.object, f"{path_prefix}.{attr.name}") elif isinstance(obj, types.ModuleType): if not getattr(obj, "__name__", "").startswith(root_name): From 17c47f51dee04657b5e4193e0a409f6513493a3e Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 18 Jul 2025 20:30:16 +0000 Subject: [PATCH 067/244] update notebooks --- doc/examples/tutorial_HD_dataset.md | 16 +-- doc/user_guide/07_decoding.md | 43 +++--- pynapple/process/decoding.py | 12 +- tests/test_decoding.py | 199 ++++++++++++---------------- 4 files changed, 117 insertions(+), 153 deletions(-) diff --git a/doc/examples/tutorial_HD_dataset.md b/doc/examples/tutorial_HD_dataset.md index c1b59f830..ffb4f4481 100644 --- a/doc/examples/tutorial_HD_dataset.md +++ b/doc/examples/tutorial_HD_dataset.md @@ -183,17 +183,11 @@ To decode the population activity, we will be using a bayesian decoder as implem Again, just a single line of code! ```{code-cell} ipython3 -print(tuning_curves.to_pandas().T) -print(spikes_adn) -``` - -```{code-cell} ipython3 -decoded, proba_feature = nap.decode_1d( - tuning_curves=tuning_curves.to_pandas().T, +decoded, proba_feature = nap.decode( + tuning_curves=tuning_curves, group=spikes_adn, - ep=epochs[epochs.tags == "wake"], - bin_size=0.1, # second - feature=angle, + epochs=epochs[epochs.tags == "wake"], + bin_size=0.1, ) ``` @@ -203,7 +197,7 @@ What does this look like? print(decoded) ``` -The variable 'decoded' contains the most probable angle, and 'proba_feature' that contains the probability of a given angular bin at a given time point: +The variable 'decoded' contains the most probable angle, and 'proba_feature' contains the probability of a given angular bin at a given time point: ```{code-cell} ipython3 print(proba_feature) diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index 58bffd738..c196f3652 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -63,7 +63,7 @@ index = np.digitize(feature, bins)-1 count = np.random.poisson(tc[index])>0 tsgroup = nap.TsGroup({i:nap.Ts(timestep[count[:,i]]) for i in range(N)}) -epoch = nap.IntervalSet(0, 10) +epochs = nap.IntervalSet(0, 10) ``` To decode, we need to compute tuning curves in 1D. @@ -74,7 +74,7 @@ tuning_curves_1d = nap.compute_tuning_curves( ) ``` -We can display the tuning curves of each neurons +We can display the tuning curve of each neuron: ```{code-cell} ipython3 :tags: [hide-input] @@ -84,31 +84,31 @@ tuning_curves_1d.plot.line(x="feature", add_legend=False) plt.show() ``` -`nap.decode_1d` performs bayesian decoding: +`nap.decode` performs bayesian decoding: ```{code-cell} ipython3 -decoded, proba_feature = nap.decode_1d( - tuning_curves=tuning_curves_1d.to_pandas().T, # 1D tuning curves +decoded, proba_feature = nap.decode( + tuning_curves=tuning_curves_1d, # 1D tuning curves group=tsgroup, # Spiking activity - ep=epoch, # Small epoch + epochs=epochs, # Small epoch bin_size=0.06, # How to bin the spike trains - feature=feature, # Features ) ``` -`decoded` is `Tsd` object containing the decoded feature value. `proba_feature` is a `TsdFrame` containing the probabilities of being in a particular feature bin over time. +`decoded` is a `Tsd` object containing the decoded feature value. +`proba_feature` is a `TsdFrame` containing the probabilities of being in a particular feature bin over time. ```{code-cell} ipython3 :tags: [hide-input] plt.figure(figsize=(12, 6)) plt.subplot(211) -plt.plot(feature.restrict(epoch), label="True") +plt.plot(feature.restrict(epochs), label="True") plt.plot(decoded, label="Decoded") plt.legend() -plt.xlim(epoch[0,0], epoch[0,1]) +plt.xlim(epochs[0,0], epochs[0,1]) plt.subplot(212) plt.imshow(proba_feature.values.T, aspect="auto", origin="lower", cmap="viridis") -plt.xticks([0, len(decoded)], epoch.values[0]) +plt.xticks([0, len(decoded)], epochs.values[0]) plt.xlabel("Time (s)") plt.show() ``` @@ -123,7 +123,7 @@ features = np.vstack((np.cos(np.arange(0, 1000, dt)), np.sin(np.arange(0, 1000, features = nap.TsdFrame(t=np.arange(0, 1000, dt), d=features, time_units="s", - time_support=epoch, + time_support=epochs, columns=["a", "b"], ) @@ -137,10 +137,10 @@ for i in range(12): ts = times[(alpha >= bins[i, 0]) & (alpha <= bins[i + 1, 1])] ts_group[i] = nap.Ts(ts, time_units="us") -ts_group = nap.TsGroup(ts_group, time_support=epoch) +ts_group = nap.TsGroup(ts_group, time_support=epochs) ``` -To decode, we need to compute tuning curves in 2D. +To decode two dimensions, we need to compute tuning curves in 2D: ```{code-cell} ipython3 tuning_curves_2d = nap.compute_tuning_curves( @@ -152,7 +152,7 @@ tuning_curves_2d = nap.compute_tuning_curves( ) ``` -We can display the tuning curves of each neuron +We can again display the tuning curve of each neuron: ```{code-cell} ipython3 :tags: [hide-input] @@ -162,19 +162,14 @@ tuning_curves_2d.plot(row="unit", col_wrap=6) plt.show() ``` -`nap.decode_2d` performs bayesian decoding: +and `nap.decode` again performs bayesian decoding: ```{code-cell} ipython3 -tcs = {c: tuning_curves_2d.sel(unit=c).values for c in tuning_curves_2d.coords["unit"].values} -bins = [tuning_curves_2d.coords[dim].values for dim in tuning_curves_2d.coords if dim != "unit"] - -decoded, proba_feature = nap.decode_2d( - tuning_curves=tcs, # 2D tuning curves +decoded, proba_feature = nap.decode( + tuning_curves=tuning_curves_2d, # 2D tuning curves group=ts_group, # Spiking activity - ep=epoch, # Epoch + epochs=epochs, # Epoch bin_size=0.1, # How to bin the spike trains - xy=bins, # Features position - features=features, # Features ) ``` diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index ac2c4110f..aa3955fc1 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -50,7 +50,7 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s", use_occupancy # check tuning curves if not isinstance(tuning_curves, xr.DataArray): raise TypeError( - "tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves" + "tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves." ) # check group @@ -58,10 +58,10 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s", use_occupancy numcells = len(group) if tuning_curves.sizes["unit"] != numcells: - raise ValueError("Different shapes for tuning_curves and group") + raise ValueError("Different shapes for tuning_curves and group.") if not np.all(tuning_curves.coords["unit"] == np.array(list(group.keys()))): - raise ValueError("Different indices for tuning curves and group keys") + raise ValueError("Different indices for tuning curves and group keys.") if isinstance(group, dict): group = nap.TsGroup(group, time_support=epochs) @@ -70,10 +70,10 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s", use_occupancy numcells = group.shape[1] if tuning_curves.sizes["unit"] != numcells: - raise ValueError("Different shapes for tuning_curves and group") + raise ValueError("Different shapes for tuning_curves and group.") if not np.all(tuning_curves.coords["unit"] == group.columns): - raise ValueError("Different indices for tuning curves and group keys") + raise ValueError("Different indices for tuning curves and group keys.") count = group else: @@ -82,7 +82,7 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s", use_occupancy if use_occupancy: if "occupancy" not in tuning_curves.attrs: raise ValueError( - "use_occupancy set to True but no occupancy found in tuning curves" + "use_occupancy set to True but no occupancy found in tuning curves." ) occupancy = tuning_curves.attrs["occupancy"] if occupancy.shape != tuning_curves.shape[1:]: diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 95abb200e..e9d16a1fc 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -1,6 +1,7 @@ """Tests of decoding for `pynapple` package.""" from contextlib import nullcontext as does_not_raise +from itertools import product import numpy as np import pytest @@ -8,21 +9,39 @@ import pynapple as nap -def get_testing_set_n(n_units=1, n_features=1, binned=False): - features = nap.TsdFrame( - t=np.arange(0, 100, 1), - d=np.stack([np.repeat(np.arange(0, 2), 50) for _ in range(n_features)], axis=1), - ) - group = nap.TsGroup( - {i: nap.Ts(t=np.arange(0, 50) + 50 * i) for i in range(n_units)} - ) +def get_testing_set_n(n_features=1, binned=False): + combos = np.array(list(product([0, 1], repeat=n_features))) # (2^F, F) + reps = 5 + feature_data = np.tile(combos, (reps, 1)) # (T, F) + times = np.arange(len(feature_data)) + + features = nap.TsdFrame(t=times, d=feature_data) + epochs = nap.IntervalSet(start=0, end=len(times)) + + group = { + i: nap.Ts(t=times[np.all(feature_data == combo, axis=1)]) + for i, combo in enumerate(combos) + } + if binned: - group = group.count(bin_size=1) - tc = nap.compute_tuning_curves( - group=group, features=features, bins=2, range=[(-0.5, 1.5)] * n_features + group = nap.TsGroup(group).count(bin_size=1, ep=epochs) + group = nap.TsdFrame( + group.times() - 0.5, + group.values, + time_support=epochs, + ) + + tuning_curves = nap.compute_tuning_curves( + group, features, bins=2, range=[(-0.5, 1.5)] * n_features ) - epochs = nap.IntervalSet(start=0, end=100) - return {"tuning_curves": tc, "group": group, "epochs": epochs, "bin_size": 1} + + return { + "features": features, + "tuning_curves": tuning_curves, + "group": group, + "epochs": epochs, + "bin_size": 1, + } @pytest.mark.filterwarnings("ignore") @@ -34,50 +53,44 @@ def get_testing_set_n(n_units=1, n_features=1, binned=False): {"tuning_curves": []}, pytest.raises( TypeError, - match="tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves", + match="tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves.", ), ), ( {"tuning_curves": 1}, pytest.raises( TypeError, - match="tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves", + match="tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves.", ), ), ( {"tuning_curves": get_testing_set_n()["tuning_curves"].to_pandas().T}, pytest.raises( TypeError, - match="tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves", + match="tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves.", ), ), ( - {"tuning_curves": get_testing_set_n(n_units=2)["tuning_curves"]}, + {"tuning_curves": get_testing_set_n(2)["tuning_curves"]}, pytest.raises( ValueError, - match="Different shapes for tuning_curves and group", + match="Different shapes for tuning_curves and group.", ), ), ( { - "tuning_curves": get_testing_set_n(n_units=1)[ - "tuning_curves" - ].assign_coords(unit=[3]) + "tuning_curves": get_testing_set_n()["tuning_curves"].assign_coords( + unit=[2, 3] + ) }, pytest.raises( ValueError, - match="Different indices for tuning curves and group keys", + match="Different indices for tuning curves and group keys.", ), ), ({}, does_not_raise()), - (get_testing_set_n(1, 2), does_not_raise()), - (get_testing_set_n(1, 3), does_not_raise()), - (get_testing_set_n(2, 1), does_not_raise()), - (get_testing_set_n(2, 2), does_not_raise()), - (get_testing_set_n(2, 3), does_not_raise()), - (get_testing_set_n(3, 1), does_not_raise()), - (get_testing_set_n(3, 2), does_not_raise()), - (get_testing_set_n(3, 3), does_not_raise()), + (get_testing_set_n(1), does_not_raise()), + (get_testing_set_n(2), does_not_raise()), # group ( {"group": []}, @@ -97,16 +110,24 @@ def get_testing_set_n(n_units=1, n_features=1, binned=False): {"group": get_testing_set_n(2)["group"]}, pytest.raises( ValueError, - match="Different shapes for tuning_curves and group", + match="Different shapes for tuning_curves and group.", ), ), ( - {"group": nap.TsGroup({2: nap.Ts(t=np.arange(0, 50))})}, + { + "group": nap.TsGroup( + {2: nap.Ts(t=np.arange(0, 50)), 3: nap.Ts(t=np.arange(0, 50))} + ) + }, pytest.raises( ValueError, - match="Different indices for tuning curves and group keys", + match="Different indices for tuning curves and group keys.", ), ), + ( + {"group": nap.TsGroup(get_testing_set_n()["group"])}, + does_not_raise(), + ), ( {"group": get_testing_set_n(binned=True)["group"]}, does_not_raise(), @@ -129,7 +150,7 @@ def get_testing_set_n(n_units=1, n_features=1, binned=False): }, pytest.raises( ValueError, - match="use_occupancy set to True but no occupancy found in tuning curves", + match="use_occupancy set to True but no occupancy found in tuning curves.", ), ), ( @@ -151,89 +172,43 @@ def get_testing_set_n(n_units=1, n_features=1, binned=False): ], ) def test_decode_type_errors(overwrite_default_args, expectation): - default_args = get_testing_set_n(1) + default_args = get_testing_set_n() default_args.update(overwrite_default_args) + default_args.pop("features") with expectation: nap.decode(**default_args) -# def test_decode_1d(): -# feature, group, tc, epochs = get_testing_set_n(1) -# decoded, proba = nap.decode(tc, group, epochs, bin_size=1) -# assert isinstance(decoded, nap.Tsd) -# assert isinstance(proba, nap.TsdFrame) -# np.testing.assert_array_almost_equal(feature.values, decoded.values) -# assert len(decoded) == 100 -# assert len(proba) == 100 -# tmp = np.ones((100, 2)) -# tmp[50:, 0] = 0.0 -# tmp[0:50, 1] = 0.0 -# np.testing.assert_array_almost_equal(proba.values, tmp) -# -# -# def test_decode_1d_with_TsdFrame(): -# feature, group, tc, epochs = get_testing_set_n(1) -# count = group.count(bin_size=1, ep=epochs) -# decoded, proba = nap.decode(tc, count, epochs, bin_size=1) -# assert isinstance(decoded, nap.Tsd) -# assert isinstance(proba, nap.TsdFrame) -# np.testing.assert_array_almost_equal(feature.values, decoded.values) -# assert len(decoded) == 100 -# assert len(proba) == 100 -# tmp = np.ones((100, 2)) -# tmp[50:, 0] = 0.0 -# tmp[0:50, 1] = 0.0 -# np.testing.assert_array_almost_equal(proba.values, tmp) -# -# -# def test_decode_1d_with_occupancy(): -# feature, group, tc, epochs = get_testing_set_n(1) -# decoded, proba = nap.decode(tc, group, epochs, bin_size=1, use_occupancy=True) -# np.testing.assert_array_almost_equal(feature.values, decoded.values) -# assert isinstance(decoded, nap.Tsd) -# assert isinstance(proba, nap.TsdFrame) -# np.testing.assert_array_almost_equal(feature.values, decoded.values) -# -# -# def test_decode_1d_with_dict(): -# feature, group, tc, epochs = get_testing_set_n(1) -# group = dict(group) -# decoded, proba = nap.decode(tc, group, epochs, bin_size=1) -# assert isinstance(decoded, nap.Tsd) -# assert isinstance(proba, nap.TsdFrame) -# np.testing.assert_array_almost_equal(feature.values, decoded.values) -# assert len(decoded) == 100 -# assert len(proba) == 100 -# tmp = np.ones((100, 2)) -# tmp[50:, 0] = 0.0 -# tmp[0:50, 1] = 0.0 -# np.testing.assert_array_almost_equal(proba.values, tmp) -# -# -# def test_decode_1d_with_time_units(): -# feature, group, tc, epochs = get_testing_set_n(1) -# for t, tu in zip([1, 1e3, 1e6], ["s", "ms", "us"]): -# decoded, proba = nap.decode(tc, group, epochs, 1.0 * t, time_units=tu) -# np.testing.assert_array_almost_equal(feature.values, decoded.values) -# -# -# def test_decoded_1d_raise_errors(): -# feature, group, tc, epochs = get_testing_set_n(1) -# with pytest.raises(Exception) as e_info: -# nap.decode(tc, np.random.rand(10), epochs, 1) -# assert str(e_info.value) == "Unknown format for group" -# -# feature, group, tc, epochs = get_testing_set_n(1) -# _tc = xr.DataArray(data=np.random.rand(10, 3), dims=["time", "unit"]) -# with pytest.raises(Exception) as e_info: -# nap.decode(_tc, group, epochs, 1) -# assert str(e_info.value) == "Different shapes for tuning_curves and group" -# -# feature, group, tc, epochs = get_testing_set_n(1) -# tc.coords["unit"] = [0, 2] -# with pytest.raises(Exception) as e_info: -# nap.decode(tc, group, epochs, 1) -# assert str(e_info.value) == "Different indices for tuning curves and group keys" +@pytest.mark.parametrize("use_occupancy", [True, False]) +@pytest.mark.parametrize("n_features", [1, 2, 3]) +@pytest.mark.parametrize("binned", [True, False]) +def test_decode(n_features, binned, use_occupancy): + features, tuning_curves, group, epochs, bin_size = get_testing_set_n( + n_features, binned=binned + ).values() + decoded, proba = nap.decode( + tuning_curves=tuning_curves, + group=group, + epochs=epochs, + bin_size=bin_size, + time_units="s", + use_occupancy=use_occupancy, + ) + + assert isinstance(decoded, nap.Tsd if features.shape[1] == 1 else nap.TsdFrame) + np.testing.assert_array_almost_equal(decoded.values, features.values.squeeze()) + + assert isinstance( + proba, + nap.TsdFrame if features.shape[1] == 1 else nap.TsdTensor, + ) + expected_proba = np.zeros((len(features), *tuning_curves.shape[1:])) + target_indices = [np.arange(len(features))] + [ + features[:, d] for d in range(features.shape[1]) + ] + expected_proba[tuple(target_indices)] = 1.0 + np.testing.assert_array_almost_equal(proba.values, expected_proba) + # ------------------------------------------------------------------------------------ # OLD DECODING TESTS From 7f281bf6e08977dc13f1e973dc0d36af74edd844 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 18 Jul 2025 20:51:20 +0000 Subject: [PATCH 068/244] decode -> decode_bayes in preparation of decode_template --- doc/examples/tutorial_HD_dataset.md | 2 +- doc/user_guide/07_decoding.md | 8 ++++---- pynapple/process/__init__.py | 2 +- pynapple/process/decoding.py | 10 ++++++---- tests/test_decoding.py | 8 ++++---- 5 files changed, 16 insertions(+), 14 deletions(-) diff --git a/doc/examples/tutorial_HD_dataset.md b/doc/examples/tutorial_HD_dataset.md index ffb4f4481..3ee7bb205 100644 --- a/doc/examples/tutorial_HD_dataset.md +++ b/doc/examples/tutorial_HD_dataset.md @@ -183,7 +183,7 @@ To decode the population activity, we will be using a bayesian decoder as implem Again, just a single line of code! ```{code-cell} ipython3 -decoded, proba_feature = nap.decode( +decoded, proba_feature = nap.decode_bayes( tuning_curves=tuning_curves, group=spikes_adn, epochs=epochs[epochs.tags == "wake"], diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index c196f3652..e43d70635 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -84,10 +84,10 @@ tuning_curves_1d.plot.line(x="feature", add_legend=False) plt.show() ``` -`nap.decode` performs bayesian decoding: +`nap.decode_bayes` performs bayesian decoding: ```{code-cell} ipython3 -decoded, proba_feature = nap.decode( +decoded, proba_feature = nap.decode_bayes( tuning_curves=tuning_curves_1d, # 1D tuning curves group=tsgroup, # Spiking activity epochs=epochs, # Small epoch @@ -162,10 +162,10 @@ tuning_curves_2d.plot(row="unit", col_wrap=6) plt.show() ``` -and `nap.decode` again performs bayesian decoding: +and `nap.decode_bayes` again performs bayesian decoding: ```{code-cell} ipython3 -decoded, proba_feature = nap.decode( +decoded, proba_feature = nap.decode_bayes( tuning_curves=tuning_curves_2d, # 2D tuning curves group=ts_group, # Spiking activity epochs=epochs, # Epoch diff --git a/pynapple/process/__init__.py b/pynapple/process/__init__.py index ca4b6f5aa..1fde96ca3 100644 --- a/pynapple/process/__init__.py +++ b/pynapple/process/__init__.py @@ -4,7 +4,7 @@ compute_eventcorrelogram, compute_isi_distribution, ) -from .decoding import decode, decode_1d, decode_2d +from .decoding import decode_1d, decode_2d, decode_bayes from .filtering import ( apply_bandpass_filter, apply_bandstop_filter, diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index aa3955fc1..0abafbf3a 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -10,7 +10,9 @@ from .. import core as nap -def decode(tuning_curves, group, epochs, bin_size, time_units="s", use_occupancy=False): +def decode_bayes( + tuning_curves, group, epochs, bin_size, time_units="s", use_occupancy=False +): """ Performs Bayesian decoding over n-dimensional features. @@ -25,7 +27,7 @@ def decode(tuning_curves, group, epochs, bin_size, time_units="s", use_occupancy ---------- tuning_curves : xr.DataArray Tuning curves as outputed by `compute_tuning_curves` (one for each unit). - group : TsGroup, TsdFrame or dict of Ts/Tsd object. + group : TsGroup, TsdFrame or dict of Ts, Tsd A group of neurons with the same keys as the tuning curves. You may also pass a TsdFrame with smoothed rates (recommended). epochs : IntervalSet @@ -160,7 +162,7 @@ def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): occupancy, _ = np.histogram(feature.values, bins) else: raise RuntimeError("Unknown format for feature in decode_1d") - return decode( + return decode_bayes( xr.DataArray( data=tuning_curves.values, coords={ @@ -201,7 +203,7 @@ def decode_2d(tuning_curves, group, ep, bin_size, xy, time_units="s", features=N features[:, 0].values, features[:, 1].values, [binsxy[0], binsxy[1]] ) occupancy = occupancy.flatten() - return decode( + return decode_bayes( xr.DataArray( data=[tuning_curves[i] for i in indexes], coords={"unit": indexes, "0": xy[0], "1": xy[1]}, diff --git a/tests/test_decoding.py b/tests/test_decoding.py index e9d16a1fc..2b9cd4607 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -171,22 +171,22 @@ def get_testing_set_n(n_features=1, binned=False): ), ], ) -def test_decode_type_errors(overwrite_default_args, expectation): +def test_decode_bayes_type_errors(overwrite_default_args, expectation): default_args = get_testing_set_n() default_args.update(overwrite_default_args) default_args.pop("features") with expectation: - nap.decode(**default_args) + nap.decode_bayes(**default_args) @pytest.mark.parametrize("use_occupancy", [True, False]) @pytest.mark.parametrize("n_features", [1, 2, 3]) @pytest.mark.parametrize("binned", [True, False]) -def test_decode(n_features, binned, use_occupancy): +def test_decode_bayes(n_features, binned, use_occupancy): features, tuning_curves, group, epochs, bin_size = get_testing_set_n( n_features, binned=binned ).values() - decoded, proba = nap.decode( + decoded, proba = nap.decode_bayes( tuning_curves=tuning_curves, group=group, epochs=epochs, From 26601640c52d54f96f0afde41ebce6372462ce56 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 18 Jul 2025 21:06:46 +0000 Subject: [PATCH 069/244] couple missed tests --- tests/test_decoding.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 2b9cd4607..8d3ca10f9 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -77,6 +77,13 @@ def get_testing_set_n(n_features=1, binned=False): match="Different shapes for tuning_curves and group.", ), ), + ( + {"tuning_curves": get_testing_set_n(2, binned=True)["tuning_curves"]}, + pytest.raises( + ValueError, + match="Different shapes for tuning_curves and group.", + ), + ), ( { "tuning_curves": get_testing_set_n()["tuning_curves"].assign_coords( @@ -88,6 +95,17 @@ def get_testing_set_n(n_features=1, binned=False): match="Different indices for tuning curves and group keys.", ), ), + ( + { + "tuning_curves": get_testing_set_n(binned=True)[ + "tuning_curves" + ].assign_coords(unit=[2, 3]) + }, + pytest.raises( + ValueError, + match="Different indices for tuning curves and group keys.", + ), + ), ({}, does_not_raise()), (get_testing_set_n(1), does_not_raise()), (get_testing_set_n(2), does_not_raise()), From e606a9e2318ccd11005997c08cd8b4b4d7e20069 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 18 Jul 2025 21:34:48 +0000 Subject: [PATCH 070/244] docstring for decode_bayes --- pynapple/process/decoding.py | 144 +++++++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 0abafbf3a..d3130d6ca 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -47,6 +47,150 @@ def decode_bayes( TsdFrame, TsdTensor The probability distribution of the decoded trajectory for each time bin + + Examples + -------- + In the simplest case, we can decode a single feature (e.g., position) from a group of neurons: + + >>> import pynapple as nap + >>> import numpy as np + >>> group = nap.TsGroup({i: nap.Ts(t=np.arange(0, 50) + 50 * i) for i in range(2)}) + >>> feature = nap.Tsd(t=np.arange(0, 100, 1), d=np.repeat(np.arange(0, 2), 50)) + >>> tuning_curves = nap.compute_tuning_curves(group, feature, bins=2, range=(-.5, 1.5)) + >>> epochs = nap.IntervalSet([0, 100]) + >>> decoded, p = nap.decode_bayes(tuning_curves, group, epochs=epochs, bin_size=1) + >>> decoded + Time (s) + ---------- -- + 0.5 0 + 1.5 0 + 2.5 0 + 3.5 0 + 4.5 0 + 5.5 0 + 6.5 0 + ... + 93.5 1 + 94.5 1 + 95.5 1 + 96.5 1 + 97.5 1 + 98.5 1 + 99.5 1 + dtype: float64, shape: (100,) + + `decode` is a `Tsd` object containing the decoded feature for each time bin. + + >>> p + Time (s) 0 1 + ---------- --- --- + 0.5 1.0 0.0 + 1.5 1.0 0.0 + 2.5 1.0 0.0 + 3.5 1.0 0.0 + 4.5 1.0 0.0 + 5.5 1.0 0.0 + 6.5 1.0 0.0 + ... ... ... + 93.5 0.0 1.0 + 94.5 0.0 1.0 + 95.5 0.0 1.0 + 96.5 0.0 1.0 + 97.5 0.0 1.0 + 98.5 0.0 1.0 + 99.5 0.0 1.0 + dtype: float64, shape: (100, 2) + + `p` is a `TsdFrame` object containing the probability distribution for each time bin. + + The function also works for multiple features, in which case it does n-dimensional decoding. + + >>> features = nap.TsdFrame( + ... t=np.arange(0, 100, 1), + ... d=np.vstack((np.repeat(np.arange(0, 2), 50), np.tile(np.arange(0, 2), 50))).T, + ... ) + >>> group = nap.TsGroup( + ... { + ... 0: nap.Ts(np.arange(0, 50, 2)), + ... 1: nap.Ts(np.arange(1, 51, 2)), + ... 2: nap.Ts(np.arange(50, 100, 2)), + ... 3: nap.Ts(np.arange(51, 101, 2)), + ... } + ... ) + >>> tuning_curves = nap.compute_tuning_curves(group, features, bins=2, range=[(-.5, 1.5)]*2) + >>> decoded, p = nap.decode_bayes(tuning_curves, group, epochs=epochs, bin_size=1) + >>> decoded + Time (s) 0 1 + ---------- --- --- + 0.5 0.0 0.0 + 1.5 0.0 1.0 + 2.5 0.0 0.0 + 3.5 0.0 1.0 + 4.5 0.0 0.0 + 5.5 0.0 1.0 + 6.5 0.0 0.0 + ... ... ... + 93.5 1.0 1.0 + 94.5 1.0 0.0 + 95.5 1.0 1.0 + 96.5 1.0 0.0 + 97.5 1.0 1.0 + 98.5 1.0 0.0 + 99.5 1.0 1.0 + dtype: float64, shape: (100, 2) + + `decode` is now a `TsdFrame` object containing the decoded features for each time bin. + + >>> p + Time (s) + ---------- -------------- + 0.5 [[1., 0.] ...] + 1.5 [[0., 1.] ...] + 2.5 [[1., 0.] ...] + 3.5 [[0., 1.] ...] + 4.5 [[1., 0.] ...] + 5.5 [[0., 1.] ...] + 6.5 [[1., 0.] ...] + ... + 93.5 [[0., 0.] ...] + 94.5 [[0., 0.] ...] + 95.5 [[0., 0.] ...] + 96.5 [[0., 0.] ...] + 97.5 [[0., 0.] ...] + 98.5 [[0., 0.] ...] + 99.5 [[0., 0.] ...] + dtype: float64, shape: (100, 2, 2) + + and `p` is a `TsdTensor` object containing the probability distribution for each time bin. + + It is also possible to pass continuous values instead of spikes (e.g. smoothed spike counts): + + >>> features = nap.TsdFrame( + ... t=np.arange(0, 100, 1), + ... d=np.vstack((np.repeat(np.arange(0, 2), 50), np.tile(np.arange(0, 2), 50))).T, + ... ) + >>> group = group.count(1).smooth(2) + >>> tuning_curves = nap.compute_tuning_curves(group, features, bins=2, range=[(-.5, 1.5)]*2) + >>> decoded, p = nap.decode_bayes(tuning_curves, group, epochs=epochs, bin_size=1) + >>> decoded + Time (s) 0 1 + ---------- --- --- + 0.5 0.0 1.0 + 1.5 0.0 1.0 + 2.5 0.0 1.0 + 3.5 0.0 1.0 + 4.5 0.0 0.0 + 5.5 0.0 0.0 + 6.5 0.0 0.0 + ... ... ... + 92.5 1.0 0.0 + 93.5 1.0 0.0 + 94.5 1.0 0.0 + 95.5 1.0 1.0 + 96.5 1.0 1.0 + 97.5 1.0 1.0 + 98.5 1.0 1.0 + dtype: float64, shape: (98, 2) """ # check tuning curves From 1d50a56c156633d26494bc4068f33839b6d38d8e Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 18 Jul 2025 21:50:15 +0000 Subject: [PATCH 071/244] better docstrings --- pynapple/process/decoding.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index d3130d6ca..8f6f06f7f 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -79,7 +79,7 @@ def decode_bayes( 99.5 1 dtype: float64, shape: (100,) - `decode` is a `Tsd` object containing the decoded feature for each time bin. + decode is a `Tsd` object containing the decoded feature for each time bin. >>> p Time (s) 0 1 @@ -101,9 +101,9 @@ def decode_bayes( 99.5 0.0 1.0 dtype: float64, shape: (100, 2) - `p` is a `TsdFrame` object containing the probability distribution for each time bin. + p is a `TsdFrame` object containing the probability distribution for each time bin. - The function also works for multiple features, in which case it does n-dimensional decoding. + The function also works for multiple features, in which case it does n-dimensional decoding: >>> features = nap.TsdFrame( ... t=np.arange(0, 100, 1), @@ -139,7 +139,7 @@ def decode_bayes( 99.5 1.0 1.0 dtype: float64, shape: (100, 2) - `decode` is now a `TsdFrame` object containing the decoded features for each time bin. + decoded is now a `TsdFrame` object containing the decoded features for each time bin. >>> p Time (s) @@ -161,14 +161,10 @@ def decode_bayes( 99.5 [[0., 0.] ...] dtype: float64, shape: (100, 2, 2) - and `p` is a `TsdTensor` object containing the probability distribution for each time bin. + and p is a `TsdTensor` object containing the probability distribution for each time bin. It is also possible to pass continuous values instead of spikes (e.g. smoothed spike counts): - >>> features = nap.TsdFrame( - ... t=np.arange(0, 100, 1), - ... d=np.vstack((np.repeat(np.arange(0, 2), 50), np.tile(np.arange(0, 2), 50))).T, - ... ) >>> group = group.count(1).smooth(2) >>> tuning_curves = nap.compute_tuning_curves(group, features, bins=2, range=[(-.5, 1.5)]*2) >>> decoded, p = nap.decode_bayes(tuning_curves, group, epochs=epochs, bin_size=1) From 89e1dba048a9d28ea133c3a48d44a359a27b6ad5 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Sat, 19 Jul 2025 23:28:03 +0000 Subject: [PATCH 072/244] addressing review --- pynapple/process/tuning_curves.py | 45 +- tests/test_tuning_curves.py | 1573 +++++++++++++++-------------- 2 files changed, 818 insertions(+), 800 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 815051743..e2eb01d20 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -264,8 +264,10 @@ def compute_tuning_curves( if feature_names is None: feature_names = features.columns else: - if not isinstance(feature_names, list) or not all( - isinstance(n, str) for n in feature_names + if ( + not hasattr(feature_names, "__len__") + or isinstance(feature_names, str) + or not all(isinstance(n, str) for n in feature_names) ): raise TypeError("feature_names should be a list of strings.") if len(feature_names) != features.shape[1]: @@ -329,30 +331,27 @@ def compute_tuning_curves( tcs[np.isnan(tcs)] = 0.0 tcs[:, occupancy == 0.0] = np.nan - if return_pandas and features.shape[1] == 1: - return pd.DataFrame( - tcs.T, - index=bin_edges[0][:-1] + np.diff(bin_edges[0]) / 2, - columns=keys, - ) - else: - return xr.DataArray( - tcs, - coords={ - "unit": keys, - **{ - str(feature_name): e[:-1] + np.diff(e) / 2 - for feature_name, e in zip(feature_names, bin_edges) - }, + tcs = xr.DataArray( + tcs, + coords={ + "unit": keys, + **{ + str(feature_name): e[:-1] + np.diff(e) / 2 + for feature_name, e in zip(feature_names, bin_edges) }, - attrs={"occupancy": occupancy, "bin_edges": bin_edges}, - ) + }, + attrs={"occupancy": occupancy, "bin_edges": bin_edges}, + ) + if return_pandas: + return tcs.to_pandas().T + else: + return tcs @_validate_tuning_inputs def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): warnings.warn( - "compute_1d_tuning_curves is deprecated and will be removed in v1.0; " + "compute_1d_tuning_curves is deprecated and will be removed in a future version;" "use compute_tuning_curves instead.", DeprecationWarning, stacklevel=2, @@ -375,7 +374,7 @@ def compute_1d_tuning_curves_continuous( tsdframe, feature, nb_bins, ep=None, minmax=None ): warnings.warn( - "compute_1d_tuning_curves_continuous is deprecated and will be removed in v1.0; " + "compute_1d_tuning_curves_continuous is deprecated and will be removed in a future version;" "use compute_tuning_curves instead.", DeprecationWarning, stacklevel=2, @@ -396,7 +395,7 @@ def compute_1d_tuning_curves_continuous( @_validate_tuning_inputs def compute_2d_tuning_curves(group, features, nb_bins, ep=None, minmax=None): warnings.warn( - "compute_2d_tuning_curves is deprecated and will be removed in v1.0; " + "compute_2d_tuning_curves is deprecated and will be removed in a future version;" "use compute_tuning_curves instead.", DeprecationWarning, stacklevel=2, @@ -420,7 +419,7 @@ def compute_2d_tuning_curves_continuous( tsdframe, features, nb_bins, ep=None, minmax=None ): warnings.warn( - "compute_2d_tuning_curves_continuous is deprecated and will be removed in v1.0; " + "compute_2d_tuning_curves_continuous is deprecated and will be removed in a future version;" "use compute_tuning_curves instead.", DeprecationWarning, stacklevel=2, diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 8ef611ac5..09a66c0ce 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -10,40 +10,6 @@ import pynapple as nap -######################## -# Type Error -######################## -def get_group(): - return nap.TsGroup({0: nap.Ts(t=np.arange(0, 100))}) - - -def get_feature(): - return nap.Tsd( - t=np.arange(0, 100, 0.1), - d=np.arange(0, 100, 0.1) % 1.0, - time_support=nap.IntervalSet(0, 100), - ) - - -def get_features(): - tmp = np.vstack( - (np.repeat(np.arange(0, 100), 10), np.tile(np.arange(0, 100), 10)) - ).T - return nap.TsdFrame( - t=np.arange(0, 200, 0.1), - d=np.vstack((tmp, tmp[::-1])), - time_support=nap.IntervalSet(0, 200), - ) - - -def get_ep(): - return nap.IntervalSet(start=0, end=50) - - -def get_tsdframe(): - return nap.TsdFrame(t=np.arange(0, 100), d=np.ones((100, 2))) - - def get_group_n(n): return nap.TsGroup( {i + 1: nap.Ts(t=np.arange(0, 100, 10 ** (i - 1))) for i in range(n)} @@ -60,142 +26,6 @@ def get_features_n(n, fs=10.0): ) -@pytest.mark.parametrize( - "group, dict_ep, expected_exception", - [ - ( - "a", - { - 0: nap.IntervalSet(start=0, end=50), - 1: nap.IntervalSet(start=50, end=100), - }, - pytest.raises(TypeError, match="group should be a TsGroup."), - ), - ( - get_group(), - "a", - pytest.raises( - TypeError, match="dict_ep should be a dictionary of IntervalSet" - ), - ), - ( - get_group(), - {0: "a", 1: nap.IntervalSet(start=50, end=100)}, - pytest.raises( - TypeError, match="dict_ep argument should contain only IntervalSet." - ), - ), - ], -) -def test_compute_discrete_tuning_curves_errors(group, dict_ep, expected_exception): - with expected_exception: - nap.compute_discrete_tuning_curves(group, dict_ep) - - -@pytest.mark.parametrize( - "tc, feature, ep, minmax, bitssec, expected_exception", - [ - ( - "a", - get_feature(), - get_ep(), - (0, 1), - True, - "Argument tc should be of type pandas.DataFrame or numpy.ndarray", - ), - ( - pd.DataFrame(), - "a", - get_ep(), - (0, 1), - True, - r"feature should be a Tsd \(or TsdFrame with 1 column only\)", - ), - ( - pd.DataFrame(), - get_feature(), - "a", - (0, 1), - True, - r"ep should be an IntervalSet", - ), - ( - pd.DataFrame(), - get_feature(), - get_ep(), - 1, - True, - r"minmax should be a tuple\/list of 2 numbers", - ), - ( - pd.DataFrame(), - get_feature(), - get_ep(), - (0, 1), - "a", - r"Argument bitssec should be of type bool", - ), - ], -) -def test_compute_1d_mutual_info_errors( - tc, feature, ep, minmax, bitssec, expected_exception -): - with pytest.raises(TypeError, match=expected_exception): - nap.compute_1d_mutual_info(tc, feature, ep, minmax, bitssec) - - -@pytest.mark.parametrize( - "dict_tc, features, ep, minmax, bitssec, expected_exception", - [ - ( - "a", - get_features(), - get_ep(), - (0, 1), - True, - "Argument dict_tc should be a dictionary of numpy.ndarray or numpy.ndarray", - ), - ( - {0: np.zeros((2, 2))}, - "a", - get_ep(), - (0, 1), - True, - r"features should be a TsdFrame with 2 columns", - ), - ( - {0: np.zeros((2, 2))}, - get_features(), - "a", - (0, 1), - True, - r"ep should be an IntervalSet", - ), - ( - {0: np.zeros((2, 2))}, - get_features(), - get_ep(), - 1, - True, - r"minmax should be a tuple\/list of 2 numbers", - ), - ( - {0: np.zeros((2, 2))}, - get_features(), - get_ep(), - (0, 1), - "a", - r"Argument bitssec should be of type bool", - ), - ], -) -def test_compute_2d_mutual_info_errors( - dict_tc, features, ep, minmax, bitssec, expected_exception -): - with pytest.raises(TypeError, match=expected_exception): - nap.compute_2d_mutual_info(dict_tc, features, ep, minmax, bitssec) - - @pytest.mark.parametrize( "group, features, kwargs, expectation", [ @@ -359,21 +189,16 @@ def test_compute_2d_mutual_info_errors( ( get_group_n(1), get_features_n(1), - {"feature_names": ["feature0"]}, - does_not_raise(), - ), - ( - get_group_n(1), - get_features_n(1), - {"feature_names": ["feature0", "feature1"]}, + {"feature_names": [1]}, pytest.raises( - ValueError, match="feature_names should match the number of features." + TypeError, + match="feature_names should be a list of strings.", ), ), ( get_group_n(1), get_features_n(1), - {"feature_names": [1]}, + {"feature_names": [(1,)]}, pytest.raises( TypeError, match="feature_names should be a list of strings.", @@ -382,7 +207,7 @@ def test_compute_2d_mutual_info_errors( ( get_group_n(1), get_features_n(1), - {"feature_names": [(1,)]}, + {"feature_names": [(1, 1)]}, pytest.raises( TypeError, match="feature_names should be a list of strings.", @@ -391,12 +216,47 @@ def test_compute_2d_mutual_info_errors( ( get_group_n(1), get_features_n(1), - {"feature_names": [(1, 1)]}, + {"feature_names": ["feature0", "feature1"]}, pytest.raises( - TypeError, - match="feature_names should be a list of strings.", + ValueError, match="feature_names should match the number of features." ), ), + ( + get_group_n(1), + get_features_n(1), + {"feature_names": ["feature0"]}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(2), + {"feature_names": ["feature0", "feature1"]}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(1), + {"feature_names": np.array(["feature0"])}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(2), + {"feature_names": np.array(["feature0", "feature1"])}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(1), + {"feature_names": ("feature0",)}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(2), + {"feature_names": ("feature0", "feature1")}, + does_not_raise(), + ), # return pandas ( get_group_n(1), @@ -422,6 +282,15 @@ def test_compute_2d_mutual_info_errors( {"return_pandas": True}, does_not_raise(), ), + ( + get_group_n(1), + get_features_n(2), + {"return_pandas": True}, + pytest.raises( + ValueError, + match="Cannot convert arrays with 3 dimensions into pandas objects. Requires 2 or fewer dimensions.", + ), + ), ], ) def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation): @@ -429,118 +298,520 @@ def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation) nap.compute_tuning_curves(group, features, **kwargs) -######################## -# Normal test -######################## -@pytest.mark.parametrize("group", [get_group()]) -@pytest.mark.parametrize( - "dict_ep", - [ - {0: nap.IntervalSet(start=0, end=50), 1: nap.IntervalSet(start=50, end=100)}, - { - "0": nap.IntervalSet(start=0, end=50), - "1": nap.IntervalSet(start=50, end=100), - }, - ], -) -def test_compute_discrete_tuning_curves(group, dict_ep): - tc = nap.compute_discrete_tuning_curves(group, dict_ep) - assert len(tc) == 2 - assert list(tc.columns) == list(group.keys()) - assert list(tc.index.values) == list(dict_ep.keys()) - np.testing.assert_almost_equal(tc.iloc[0, 0], 51 / 50) - np.testing.assert_almost_equal(tc.iloc[1, 0], 1) - - -@pytest.mark.filterwarnings("ignore") @pytest.mark.parametrize( - "args, kwargs, expected", + "group, features, kwargs, expected", [ - ((get_group(), get_feature(), 10), {}, np.array([10.0] + [0.0] * 9)[:, None]), + # single rate unit, single feature ( - (get_group(), get_feature(), 10), - {"ep": get_ep()}, - np.array([10.0] + [0.0] * 9)[:, None], - ), + get_group_n(1).count(1.0), + get_features_n(1), + {}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # multiple rate units, single feature + ( + get_group_n(2).count(1.0), + get_features_n(1), + {}, + xr.DataArray( + np.concatenate([np.full((1, 10), 10.0), np.full((1, 10), 1.0)]), + dims=["unit", "feature0"], + coords={ + "unit": [1, 2], + "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + }, + ), + ), + # multiple rate units, multiple features + ( + get_group_n(2).count(1.0), + get_features_n(2), + {}, + xr.DataArray( + np.stack( + [ + np.where(np.eye(10), 10.0, np.nan), + np.where(np.eye(10), 1.0, np.nan), + ] + ), + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1, 2], + "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, + }, + ), + ), + # single unit, single feature + ( + get_group_n(1), + get_features_n(1), + {}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # multiple units, single feature + ( + get_group_n(2), + get_features_n(1), + {}, + xr.DataArray( + np.concatenate([np.full((1, 10), 10.0), np.full((1, 10), 1.0)]), + dims=["unit", "feature0"], + coords={ + "unit": [1, 2], + "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + }, + ), + ), + # multiple units, multiple features + ( + get_group_n(2), + get_features_n(2), + {}, + xr.DataArray( + np.stack( + [ + np.where(np.eye(10), 10.0, np.nan), + np.where(np.eye(10), 1.0, np.nan), + ] + ), + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1, 2], + "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, + }, + ), + ), + # single unit, single feature, specified number of bins + ( + get_group_n(1), + get_features_n(1), + {"bins": 5}, + xr.DataArray( + np.full((1, 5), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99}, + ), + ), + # single unit, multiple features, specified number of bins + ( + get_group_n(1), + get_features_n(2), + {"bins": 5}, + xr.DataArray( + np.where(np.eye(5), 10.0, np.nan)[None, :], + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99, + "feature1": np.linspace(0, 19.8, 6)[:-1] + 1.98, + }, + ), + ), + # single unit, multiple features, specified number of bins per feature + ( + get_group_n(1), + get_features_n(2), + {"bins": (5, 4)}, + xr.DataArray( + np.array( + [ + [ + [10.0, np.nan, np.nan, np.nan], + [10.0, 10.0, np.nan, np.nan], + [np.nan, 10.0, 10.0, np.nan], + [np.nan, np.nan, 10.0, 10.0], + [np.nan, np.nan, np.nan, 10.0], + ] + ] + ), + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99, + "feature1": np.linspace(0, 19.8, 5)[:-1] + 2.475, + }, + ), + ), + # single unit, single feature, specified bins + ( + get_group_n(1), + get_features_n(1), + {"bins": [np.linspace(0, 10, 6)]}, + xr.DataArray( + np.full((1, 5), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.arange(1, 11, 2)}, + ), + ), + # single unit, multiple features, specified bins + ( + get_group_n(1), + get_features_n(2), + {"bins": [np.linspace(0, 10, 6), np.linspace(0, 20, 6)]}, + xr.DataArray( + np.where(np.eye(5), 10.0, np.nan)[None, :], + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.arange(1, 11, 2), + "feature1": np.arange(2, 22, 4), + }, + ), + ), + # single unit, single feature, specified range + ( + get_group_n(1), + get_features_n(1), + {"range": [(0, 5)]}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25}, + ), + ), + # single unit, multiple features, specified range per feature + ( + get_group_n(1), + get_features_n(2), + {"range": [(0, 5), (0, 10)]}, + xr.DataArray( + np.where(np.eye(10), 10.0, np.nan)[None, :], + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, + "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, + }, + ), + ), + # single unit, single feature, specified range and number of bins + ( + get_group_n(1), + get_features_n(1), + {"bins": 10, "range": [(0, 5)]}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25}, + ), + ), + # single unit, multiple features, specified range per feature and number of bins + ( + get_group_n(1), + get_features_n(2), + {"bins": 10, "range": [(0, 5), (0, 10)]}, + xr.DataArray( + np.where(np.eye(10), 10.0, np.nan)[None, :], + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, + "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, + }, + ), + ), + # single unit, multiple features, specified range and number of bins per feature + ( + get_group_n(1), + get_features_n(2), + {"bins": (10, 10), "range": [(0, 5), (0, 10)]}, + xr.DataArray( + np.where(np.eye(10), 10.0, np.nan)[None, :], + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1], + "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, + "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, + }, + ), + ), + # single unit, single feature, specified epochs (smaller) + ( + get_group_n(1), + get_features_n(1), + {"epochs": nap.IntervalSet([0.0, 50.0])}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # single unit, single feature, specified epochs (larger) + ( + get_group_n(1), + get_features_n(1), + {"epochs": nap.IntervalSet([0.0, 200.0])}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # single unit, single feature, specified epochs (multiple) + ( + get_group_n(1), + get_features_n(1), + {"epochs": nap.IntervalSet([0.0, 50.0], [20.0, 70.0])}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # single unit, single feature, specified feature name ( - (get_group(), get_feature(), 10), - {"minmax": (0, 0.9)}, - np.array([10.0] + [0.0] * 9)[:, None], + get_group_n(1), + get_features_n(1), + {"feature_names": ["f0"]}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "f0"], + coords={"unit": [1], "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), ), + # single unit, multiple features, specified feature names ( - (get_group(), get_feature(), 20), - {"minmax": (0, 1.9)}, - np.array([10.0] + [0.0] * 9 + [np.nan] * 10)[:, None], + get_group_n(1), + get_features_n(2), + {"feature_names": ["f0", "f1"]}, + xr.DataArray( + np.where(np.eye(10), 10.0, np.nan)[None, :], + dims=["unit", "f0", "f1"], + coords={ + "unit": [1], + "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + "f1": np.linspace(0, 19.8, 11)[:-1] + 0.99, + }, + ), + ), + # single unit, single feature, return_pandas=True + ( + get_group_n(1), + get_features_n(1), + {"return_pandas": True}, + xr.DataArray( + np.full((1, 10), 10.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ) + .to_pandas() + .T, ), ], ) -def test_compute_1d_tuning_curves(args, kwargs, expected): - tc = nap.compute_1d_tuning_curves(*args, **kwargs) - # Columns - assert list(tc.columns) == list(args[0].keys()) - - # Index - assert len(tc) == args[2] - if "minmax" in kwargs: - tmp = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], args[2] + 1) +def test_compute_tuning_curves(group, features, kwargs, expected): + tcs = nap.compute_tuning_curves(group, features, **kwargs) + if isinstance(expected, pd.DataFrame): + pd.testing.assert_frame_equal(tcs, expected) else: - tmp = np.linspace(np.min(args[1]), np.max(args[1]), args[2] + 1) - np.testing.assert_almost_equal(tmp[0:-1] + np.diff(tmp) / 2, tc.index.values) + xr.testing.assert_allclose(tcs, expected) - # Array - np.testing.assert_almost_equal(tc.values, expected) + +# ------------------------------------------------------------------------------------ +# DISCRETE TUNING CURVE TESTS +# ------------------------------------------------------------------------------------ + + +def get_group(): + return nap.TsGroup({0: nap.Ts(t=np.arange(0, 100))}) + + +def get_feature(): + return nap.Tsd( + t=np.arange(0, 100, 0.1), + d=np.arange(0, 100, 0.1) % 1.0, + time_support=nap.IntervalSet(0, 100), + ) + + +def get_features(): + tmp = np.vstack( + (np.repeat(np.arange(0, 100), 10), np.tile(np.arange(0, 100), 10)) + ).T + return nap.TsdFrame( + t=np.arange(0, 200, 0.1), + d=np.vstack((tmp, tmp[::-1])), + time_support=nap.IntervalSet(0, 200), + ) + + +def get_ep(): + return nap.IntervalSet(start=0, end=50) + + +def get_tsdframe(): + return nap.TsdFrame(t=np.arange(0, 100), d=np.ones((100, 2))) -@pytest.mark.filterwarnings("ignore") @pytest.mark.parametrize( - "args, kwargs, expected", + "group, dict_ep, expected_exception", [ - ((get_group(), get_features(), 10), {}, np.ones((10, 10)) * 0.5), - ((get_group(), get_features(), (10, 10)), {}, np.ones((10, 10)) * 0.5), ( - (get_group(), get_features(), 10), - {"ep": nap.IntervalSet(0, 400)}, - np.ones((10, 10)) * 0.5, + "a", + { + 0: nap.IntervalSet(start=0, end=50), + 1: nap.IntervalSet(start=50, end=100), + }, + pytest.raises(TypeError, match="group should be a TsGroup."), ), ( - (get_group(), get_features(), 10), - {"minmax": (0, 100, 0, 100)}, - np.ones((10, 10)) * 0.5, + get_group(), + "a", + pytest.raises( + TypeError, match="dict_ep should be a dictionary of IntervalSet" + ), ), ( - (get_group(), get_features(), 10), - {"minmax": (0, 200, 0, 100)}, - np.vstack((np.ones((5, 10)) * 0.5, np.ones((5, 10)) * np.nan)), + get_group(), + {0: "a", 1: nap.IntervalSet(start=50, end=100)}, + pytest.raises( + TypeError, match="dict_ep argument should contain only IntervalSet." + ), ), ], ) -def test_compute_2d_tuning_curves(args, kwargs, expected): - tc, xy = nap.compute_2d_tuning_curves(*args, **kwargs) - assert isinstance(tc, dict) +def test_compute_discrete_tuning_curves_errors(group, dict_ep, expected_exception): + with expected_exception: + nap.compute_discrete_tuning_curves(group, dict_ep) - # Keys - assert list(tc.keys()) == list(args[0].keys()) - # Index - assert isinstance(xy, list) - assert len(xy) == 2 - nb_bins = args[2] - if isinstance(args[2], int): - nb_bins = (args[2], args[2]) - if "minmax" in kwargs: - tmp1 = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins[0] + 1) - tmp2 = np.linspace(kwargs["minmax"][2], kwargs["minmax"][3], nb_bins[1] + 1) - else: - tmp1 = np.linspace(np.min(args[1][:, 0]), np.max(args[1][:, 0]), nb_bins[0] + 1) - tmp2 = np.linspace(np.min(args[1][:, 1]), np.max(args[1][:, 1]), nb_bins[1] + 1) +@pytest.mark.parametrize("group", [get_group()]) +@pytest.mark.parametrize( + "dict_ep", + [ + {0: nap.IntervalSet(start=0, end=50), 1: nap.IntervalSet(start=50, end=100)}, + { + "0": nap.IntervalSet(start=0, end=50), + "1": nap.IntervalSet(start=50, end=100), + }, + ], +) +def test_compute_discrete_tuning_curves(group, dict_ep): + tc = nap.compute_discrete_tuning_curves(group, dict_ep) + assert len(tc) == 2 + assert list(tc.columns) == list(group.keys()) + assert list(tc.index.values) == list(dict_ep.keys()) + np.testing.assert_almost_equal(tc.iloc[0, 0], 51 / 50) + np.testing.assert_almost_equal(tc.iloc[1, 0], 1) - np.testing.assert_almost_equal(tmp1[0:-1] + np.diff(tmp1) / 2, xy[0]) - np.testing.assert_almost_equal(tmp2[0:-1] + np.diff(tmp2) / 2, xy[1]) - # Values - for i in tc.keys(): - assert tc[i].shape == nb_bins - np.testing.assert_almost_equal(tc[i], expected) +# ------------------------------------------------------------------------------------ +# MUTUAL INFORMATION TESTS +# ------------------------------------------------------------------------------------ + + +@pytest.mark.parametrize( + "tc, feature, ep, minmax, bitssec, expected_exception", + [ + ( + "a", + get_feature(), + get_ep(), + (0, 1), + True, + "Argument tc should be of type pandas.DataFrame or numpy.ndarray", + ), + ( + pd.DataFrame(), + "a", + get_ep(), + (0, 1), + True, + r"feature should be a Tsd \(or TsdFrame with 1 column only\)", + ), + ( + pd.DataFrame(), + get_feature(), + "a", + (0, 1), + True, + r"ep should be an IntervalSet", + ), + ( + pd.DataFrame(), + get_feature(), + get_ep(), + 1, + True, + r"minmax should be a tuple\/list of 2 numbers", + ), + ( + pd.DataFrame(), + get_feature(), + get_ep(), + (0, 1), + "a", + r"Argument bitssec should be of type bool", + ), + ], +) +def test_compute_1d_mutual_info_errors( + tc, feature, ep, minmax, bitssec, expected_exception +): + with pytest.raises(TypeError, match=expected_exception): + nap.compute_1d_mutual_info(tc, feature, ep, minmax, bitssec) + + +@pytest.mark.parametrize( + "dict_tc, features, ep, minmax, bitssec, expected_exception", + [ + ( + "a", + get_features(), + get_ep(), + (0, 1), + True, + "Argument dict_tc should be a dictionary of numpy.ndarray or numpy.ndarray", + ), + ( + {0: np.zeros((2, 2))}, + "a", + get_ep(), + (0, 1), + True, + r"features should be a TsdFrame with 2 columns", + ), + ( + {0: np.zeros((2, 2))}, + get_features(), + "a", + (0, 1), + True, + r"ep should be an IntervalSet", + ), + ( + {0: np.zeros((2, 2))}, + get_features(), + get_ep(), + 1, + True, + r"minmax should be a tuple\/list of 2 numbers", + ), + ( + {0: np.zeros((2, 2))}, + get_features(), + get_ep(), + (0, 1), + "a", + r"Argument bitssec should be of type bool", + ), + ], +) +def test_compute_2d_mutual_info_errors( + dict_tc, features, ep, minmax, bitssec, expected_exception +): + with pytest.raises(TypeError, match=expected_exception): + nap.compute_2d_mutual_info(dict_tc, features, ep, minmax, bitssec) @pytest.mark.filterwarnings("ignore") @@ -605,573 +876,321 @@ def test_compute_1d_mutual_info(args, kwargs, expected): "args, kwargs, expected", [ ( - ( - {0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame( - t=np.arange(100), - d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, - ), - ), - {}, - np.array([[2.0]]), - ), - ( - ( - np.array([[[0, 1], [0, 0]]]), - nap.TsdFrame( - t=np.arange(100), - d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, - ), - ), - {}, - np.array([[2.0]]), - ), - ( - ( - {0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame( - t=np.arange(100), - d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, - ), - ), - {"bitssec": True}, - np.array([[0.5]]), - ), - ( - ( - {0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame( - t=np.arange(100), - d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, - ), - ), - {"ep": nap.IntervalSet(start=0, end=7)}, - np.array([[2.0]]), - ), - ( - ( - {0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame( - t=np.arange(100), - d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, - ), - ), - {"minmax": (0, 1, 0, 1)}, - np.array([[2.0]]), - ), - ], -) -def test_compute_2d_mutual_info(args, kwargs, expected): - dict_tc = args[0] - features = args[1] - si = nap.compute_2d_mutual_info(dict_tc, features, **kwargs) - assert isinstance(si, pd.DataFrame) - assert list(si.columns) == ["SI"] - if isinstance(dict_tc, dict): - assert list(si.index.values) == list(dict_tc.keys()) - np.testing.assert_approx_equal(si.values, expected) - - -@pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize( - "args, kwargs, expected", - [ - ( - (get_tsdframe(), get_feature(), 10), - {}, - np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), - ), - ( - (get_tsdframe(), get_feature()[:, np.newaxis], 10), - {}, - np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), - ), - ( - (get_tsdframe()[:, 0], get_feature(), 10), - {}, - np.vstack((np.ones((1, 1)), np.zeros((9, 1)))), - ), - ( - (get_tsdframe(), get_feature(), 10), - {"ep": get_ep()}, - np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), - ), - ( - (get_tsdframe(), get_feature(), 10), - {"minmax": (0, 0.9)}, - np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), - ), - ( - (get_tsdframe(), get_feature(), 20), - {"minmax": (0, 1.9)}, - np.vstack((np.ones((1, 2)), np.zeros((9, 2)), np.ones((10, 2)) * np.nan)), - ), - ], -) -def test_compute_1d_tuning_curves_continuous(args, kwargs, expected): - tsdframe, feature, nb_bins = args - tc = nap.compute_1d_tuning_curves_continuous(tsdframe, feature, nb_bins, **kwargs) - # Columns - if hasattr(tsdframe, "columns"): - assert list(tc.columns) == list(tsdframe.columns) - # Index - assert len(tc) == nb_bins - if "minmax" in kwargs: - tmp = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins + 1) - else: - tmp = np.linspace(np.min(feature), np.max(feature), nb_bins + 1) - np.testing.assert_almost_equal(tmp[0:-1] + np.diff(tmp) / 2, tc.index.values) - # Array - np.testing.assert_almost_equal(tc.values, expected) - - -@pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize( - "tsdframe, nb_bins, kwargs, expected", - [ - ( - nap.TsdFrame( - t=np.arange(0, 100), - d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), - ), - 1, - {}, - {0: np.array([[1.0]]), 1: np.array([[2.0]])}, - ), - ( - nap.TsdFrame( - t=np.arange(0, 100), - d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), - columns=["x", "y"], - ), - 2, - {}, - {"x": np.ones((2, 2)), "y": np.ones((2, 2)) * 2}, - ), - ( - nap.Tsd(t=np.arange(0, 100), d=np.hstack((np.ones((100,)) * 2))), - 2, - {}, - {0: np.ones((2, 2)) * 2}, - ), - ( - nap.TsdFrame( - t=np.arange(0, 100), - d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), - ), - (1, 2), - {}, - {0: np.array([[1.0, 1.0]]), 1: np.array([[2.0, 2.0]])}, - ), - ( - nap.TsdFrame( - t=np.arange(0, 100), - d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), - ), - 1, - {"ep": get_ep()}, - {0: np.array([[1.0]]), 1: np.array([[2.0]])}, - ), - ( - nap.TsdFrame( - t=np.arange(0, 100), - d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), - ), - 1, - {"minmax": (0, 1, 0, 1)}, - {0: np.array([[1.0]]), 1: np.array([[2.0]])}, - ), - ( - nap.TsdFrame( - t=np.arange(0, 100), - d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), - ), - (1, 3), - {"minmax": (0, 1, 0, 3)}, - {0: np.array([[1.0, 1.0, np.nan]]), 1: np.array([[2.0, 2.0, np.nan]])}, - ), - ], -) -def test_compute_2d_tuning_curves_continuous(tsdframe, nb_bins, kwargs, expected): - features = nap.TsdFrame( - t=np.arange(100), d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T - ) - tc, xy = nap.compute_2d_tuning_curves_continuous( - tsdframe, features, nb_bins, **kwargs - ) - - # Keys - if hasattr(tsdframe, "columns"): - assert list(tc.keys()) == list(tsdframe.columns) - - # Index - assert isinstance(xy, list) - assert len(xy) == 2 - if isinstance(nb_bins, int): - nb_bins = (nb_bins, nb_bins) - if "minmax" in kwargs: - tmp1 = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins[0] + 1) - tmp2 = np.linspace(kwargs["minmax"][2], kwargs["minmax"][3], nb_bins[1] + 1) - else: - tmp1 = np.linspace( - np.min(features[:, 0]), np.max(features[:, 0]), nb_bins[0] + 1 - ) - tmp2 = np.linspace( - np.min(features[:, 1]), np.max(features[:, 1]), nb_bins[1] + 1 - ) - - np.testing.assert_almost_equal(tmp1[0:-1] + np.diff(tmp1) / 2, xy[0]) - np.testing.assert_almost_equal(tmp2[0:-1] + np.diff(tmp2) / 2, xy[1]) - - # Values - for i in tc.keys(): - assert tc[i].shape == nb_bins - np.testing.assert_almost_equal(tc[i], expected[i]) - - -@pytest.mark.parametrize( - "group, features, kwargs, expected", - [ - # single rate unit, single feature - ( - get_group_n(1).count(1.0), - get_features_n(1), - {}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, - ), - ), - # multiple rate units, single feature - ( - get_group_n(2).count(1.0), - get_features_n(1), - {}, - xr.DataArray( - np.concatenate([np.full((1, 10), 10.0), np.full((1, 10), 1.0)]), - dims=["unit", "feature0"], - coords={ - "unit": [1, 2], - "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, - }, + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), ), + {}, + np.array([[2.0]]), ), - # multiple rate units, multiple features ( - get_group_n(2).count(1.0), - get_features_n(2), - {}, - xr.DataArray( - np.stack( - [ - np.where(np.eye(10), 10.0, np.nan), - np.where(np.eye(10), 1.0, np.nan), - ] + ( + np.array([[[0, 1], [0, 0]]]), + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, ), - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1, 2], - "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, - "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, - }, ), + {}, + np.array([[2.0]]), ), - # single unit, single feature ( - get_group_n(1), - get_features_n(1), - {}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), ), + {"bitssec": True}, + np.array([[0.5]]), ), - # multiple units, single feature ( - get_group_n(2), - get_features_n(1), - {}, - xr.DataArray( - np.concatenate([np.full((1, 10), 10.0), np.full((1, 10), 1.0)]), - dims=["unit", "feature0"], - coords={ - "unit": [1, 2], - "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, - }, + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), ), + {"ep": nap.IntervalSet(start=0, end=7)}, + np.array([[2.0]]), ), - # multiple units, multiple features ( - get_group_n(2), - get_features_n(2), - {}, - xr.DataArray( - np.stack( - [ - np.where(np.eye(10), 10.0, np.nan), - np.where(np.eye(10), 1.0, np.nan), - ] + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, ), - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1, 2], - "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, - "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, - }, ), + {"minmax": (0, 1, 0, 1)}, + np.array([[2.0]]), ), - # single unit, single feature, specified number of bins + ], +) +def test_compute_2d_mutual_info(args, kwargs, expected): + dict_tc = args[0] + features = args[1] + si = nap.compute_2d_mutual_info(dict_tc, features, **kwargs) + assert isinstance(si, pd.DataFrame) + assert list(si.columns) == ["SI"] + if isinstance(dict_tc, dict): + assert list(si.index.values) == list(dict_tc.keys()) + np.testing.assert_approx_equal(si.values, expected) + + +# ------------------------------------------------------------------------------------ +# OLD TUNING CURVE TESTS +# ------------------------------------------------------------------------------------ + + +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize( + "args, kwargs, expected", + [ + ((get_group(), get_feature(), 10), {}, np.array([10.0] + [0.0] * 9)[:, None]), ( - get_group_n(1), - get_features_n(1), - {"bins": 5}, - xr.DataArray( - np.full((1, 5), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99}, - ), + (get_group(), get_feature(), 10), + {"ep": get_ep()}, + np.array([10.0] + [0.0] * 9)[:, None], ), - # single unit, multiple features, specified number of bins ( - get_group_n(1), - get_features_n(2), - {"bins": 5}, - xr.DataArray( - np.where(np.eye(5), 10.0, np.nan)[None, :], - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1], - "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99, - "feature1": np.linspace(0, 19.8, 6)[:-1] + 1.98, - }, - ), + (get_group(), get_feature(), 10), + {"minmax": (0, 0.9)}, + np.array([10.0] + [0.0] * 9)[:, None], ), - # single unit, multiple features, specified number of bins per feature ( - get_group_n(1), - get_features_n(2), - {"bins": (5, 4)}, - xr.DataArray( - np.array( - [ - [ - [10.0, np.nan, np.nan, np.nan], - [10.0, 10.0, np.nan, np.nan], - [np.nan, 10.0, 10.0, np.nan], - [np.nan, np.nan, 10.0, 10.0], - [np.nan, np.nan, np.nan, 10.0], - ] - ] - ), - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1], - "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99, - "feature1": np.linspace(0, 19.8, 5)[:-1] + 2.475, - }, - ), + (get_group(), get_feature(), 20), + {"minmax": (0, 1.9)}, + np.array([10.0] + [0.0] * 9 + [np.nan] * 10)[:, None], ), - # single unit, single feature, specified bins + ], +) +def test_compute_1d_tuning_curves(args, kwargs, expected): + tc = nap.compute_1d_tuning_curves(*args, **kwargs) + # Columns + assert list(tc.columns) == list(args[0].keys()) + + # Index + assert len(tc) == args[2] + if "minmax" in kwargs: + tmp = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], args[2] + 1) + else: + tmp = np.linspace(np.min(args[1]), np.max(args[1]), args[2] + 1) + np.testing.assert_almost_equal(tmp[0:-1] + np.diff(tmp) / 2, tc.index.values) + + # Array + np.testing.assert_almost_equal(tc.values, expected) + + +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize( + "args, kwargs, expected", + [ + ((get_group(), get_features(), 10), {}, np.ones((10, 10)) * 0.5), + ((get_group(), get_features(), (10, 10)), {}, np.ones((10, 10)) * 0.5), ( - get_group_n(1), - get_features_n(1), - {"bins": [np.linspace(0, 10, 6)]}, - xr.DataArray( - np.full((1, 5), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.arange(1, 11, 2)}, - ), + (get_group(), get_features(), 10), + {"ep": nap.IntervalSet(0, 400)}, + np.ones((10, 10)) * 0.5, ), - # single unit, multiple features, specified bins ( - get_group_n(1), - get_features_n(2), - {"bins": [np.linspace(0, 10, 6), np.linspace(0, 20, 6)]}, - xr.DataArray( - np.where(np.eye(5), 10.0, np.nan)[None, :], - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1], - "feature0": np.arange(1, 11, 2), - "feature1": np.arange(2, 22, 4), - }, - ), + (get_group(), get_features(), 10), + {"minmax": (0, 100, 0, 100)}, + np.ones((10, 10)) * 0.5, ), - # single unit, single feature, specified range ( - get_group_n(1), - get_features_n(1), - {"range": [(0, 5)]}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25}, - ), + (get_group(), get_features(), 10), + {"minmax": (0, 200, 0, 100)}, + np.vstack((np.ones((5, 10)) * 0.5, np.ones((5, 10)) * np.nan)), + ), + ], +) +def test_compute_2d_tuning_curves(args, kwargs, expected): + tc, xy = nap.compute_2d_tuning_curves(*args, **kwargs) + assert isinstance(tc, dict) + + # Keys + assert list(tc.keys()) == list(args[0].keys()) + + # Index + assert isinstance(xy, list) + assert len(xy) == 2 + nb_bins = args[2] + if isinstance(args[2], int): + nb_bins = (args[2], args[2]) + if "minmax" in kwargs: + tmp1 = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins[0] + 1) + tmp2 = np.linspace(kwargs["minmax"][2], kwargs["minmax"][3], nb_bins[1] + 1) + else: + tmp1 = np.linspace(np.min(args[1][:, 0]), np.max(args[1][:, 0]), nb_bins[0] + 1) + tmp2 = np.linspace(np.min(args[1][:, 1]), np.max(args[1][:, 1]), nb_bins[1] + 1) + + np.testing.assert_almost_equal(tmp1[0:-1] + np.diff(tmp1) / 2, xy[0]) + np.testing.assert_almost_equal(tmp2[0:-1] + np.diff(tmp2) / 2, xy[1]) + + # Values + for i in tc.keys(): + assert tc[i].shape == nb_bins + np.testing.assert_almost_equal(tc[i], expected) + + +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize( + "args, kwargs, expected", + [ + ( + (get_tsdframe(), get_feature(), 10), + {}, + np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), ), - # single unit, multiple features, specified range per feature ( - get_group_n(1), - get_features_n(2), - {"range": [(0, 5), (0, 10)]}, - xr.DataArray( - np.where(np.eye(10), 10.0, np.nan)[None, :], - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1], - "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, - "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, - }, - ), + (get_tsdframe(), get_feature()[:, np.newaxis], 10), + {}, + np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), ), - # single unit, single feature, specified range and number of bins ( - get_group_n(1), - get_features_n(1), - {"bins": 10, "range": [(0, 5)]}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25}, - ), + (get_tsdframe()[:, 0], get_feature(), 10), + {}, + np.vstack((np.ones((1, 1)), np.zeros((9, 1)))), ), - # single unit, multiple features, specified range per feature and number of bins ( - get_group_n(1), - get_features_n(2), - {"bins": 10, "range": [(0, 5), (0, 10)]}, - xr.DataArray( - np.where(np.eye(10), 10.0, np.nan)[None, :], - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1], - "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, - "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, - }, - ), + (get_tsdframe(), get_feature(), 10), + {"ep": get_ep()}, + np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), ), - # single unit, multiple features, specified range and number of bins per feature ( - get_group_n(1), - get_features_n(2), - {"bins": (10, 10), "range": [(0, 5), (0, 10)]}, - xr.DataArray( - np.where(np.eye(10), 10.0, np.nan)[None, :], - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1], - "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, - "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, - }, - ), + (get_tsdframe(), get_feature(), 10), + {"minmax": (0, 0.9)}, + np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), ), - # single unit, single feature, specified epochs (smaller) ( - get_group_n(1), - get_features_n(1), - {"epochs": nap.IntervalSet([0.0, 50.0])}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, - ), + (get_tsdframe(), get_feature(), 20), + {"minmax": (0, 1.9)}, + np.vstack((np.ones((1, 2)), np.zeros((9, 2)), np.ones((10, 2)) * np.nan)), ), - # single unit, single feature, specified epochs (larger) + ], +) +def test_compute_1d_tuning_curves_continuous(args, kwargs, expected): + tsdframe, feature, nb_bins = args + tc = nap.compute_1d_tuning_curves_continuous(tsdframe, feature, nb_bins, **kwargs) + # Columns + if hasattr(tsdframe, "columns"): + assert list(tc.columns) == list(tsdframe.columns) + # Index + assert len(tc) == nb_bins + if "minmax" in kwargs: + tmp = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins + 1) + else: + tmp = np.linspace(np.min(feature), np.max(feature), nb_bins + 1) + np.testing.assert_almost_equal(tmp[0:-1] + np.diff(tmp) / 2, tc.index.values) + # Array + np.testing.assert_almost_equal(tc.values, expected) + + +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize( + "tsdframe, nb_bins, kwargs, expected", + [ ( - get_group_n(1), - get_features_n(1), - {"epochs": nap.IntervalSet([0.0, 200.0])}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), ), + 1, + {}, + {0: np.array([[1.0]]), 1: np.array([[2.0]])}, ), - # single unit, single feature, specified epochs (multiple) ( - get_group_n(1), - get_features_n(1), - {"epochs": nap.IntervalSet([0.0, 50.0], [20.0, 70.0])}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), + columns=["x", "y"], ), + 2, + {}, + {"x": np.ones((2, 2)), "y": np.ones((2, 2)) * 2}, ), - # single unit, single feature, specified feature name ( - get_group_n(1), - get_features_n(1), - {"feature_names": ["f0"]}, - xr.DataArray( - np.full((1, 10), 10.0), - dims=["unit", "f0"], - coords={"unit": [1], "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, - ), + nap.Tsd(t=np.arange(0, 100), d=np.hstack((np.ones((100,)) * 2))), + 2, + {}, + {0: np.ones((2, 2)) * 2}, ), - # single unit, multiple features, specified feature names ( - get_group_n(1), - get_features_n(2), - {"feature_names": ["f0", "f1"]}, - xr.DataArray( - np.where(np.eye(10), 10.0, np.nan)[None, :], - dims=["unit", "f0", "f1"], - coords={ - "unit": [1], - "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495, - "f1": np.linspace(0, 19.8, 11)[:-1] + 0.99, - }, + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), ), + (1, 2), + {}, + {0: np.array([[1.0, 1.0]]), 1: np.array([[2.0, 2.0]])}, ), - # single unit, single feature, return_pandas=True ( - get_group_n(1), - get_features_n(1), - {"return_pandas": True}, - pd.DataFrame( - np.full((10, 1), 10.0), - index=np.linspace(0, 9.9, 11)[:-1] + 0.495, - columns=[1], + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), ), + 1, + {"ep": get_ep()}, + {0: np.array([[1.0]]), 1: np.array([[2.0]])}, ), - # single unit, multiple feature, return_pandas=True ( - get_group_n(1), - get_features_n(1), - {"return_pandas": True}, - pd.DataFrame( - np.full((10, 1), 10.0), - index=np.linspace(0, 9.9, 11)[:-1] + 0.495, - columns=[1], + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), ), + 1, + {"minmax": (0, 1, 0, 1)}, + {0: np.array([[1.0]]), 1: np.array([[2.0]])}, ), - # multiple units, multiple features, return_pandas=True ( - get_group_n(2), - get_features_n(2), - {"return_pandas": True}, - xr.DataArray( - np.stack( - [ - np.where(np.eye(10), 10.0, np.nan), - np.where(np.eye(10), 1.0, np.nan), - ] - ), - dims=["unit", "feature0", "feature1"], - coords={ - "unit": [1, 2], - "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, - "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, - }, + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), ), + (1, 3), + {"minmax": (0, 1, 0, 3)}, + {0: np.array([[1.0, 1.0, np.nan]]), 1: np.array([[2.0, 2.0, np.nan]])}, ), ], ) -def test_compute_tuning_curves(group, features, kwargs, expected): - tcs = nap.compute_tuning_curves(group, features, **kwargs) - if isinstance(tcs, pd.DataFrame): - pd.testing.assert_frame_equal(tcs, expected) +def test_compute_2d_tuning_curves_continuous(tsdframe, nb_bins, kwargs, expected): + features = nap.TsdFrame( + t=np.arange(100), d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T + ) + tc, xy = nap.compute_2d_tuning_curves_continuous( + tsdframe, features, nb_bins, **kwargs + ) + + # Keys + if hasattr(tsdframe, "columns"): + assert list(tc.keys()) == list(tsdframe.columns) + + # Index + assert isinstance(xy, list) + assert len(xy) == 2 + if isinstance(nb_bins, int): + nb_bins = (nb_bins, nb_bins) + if "minmax" in kwargs: + tmp1 = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins[0] + 1) + tmp2 = np.linspace(kwargs["minmax"][2], kwargs["minmax"][3], nb_bins[1] + 1) else: - xr.testing.assert_allclose(tcs, expected) + tmp1 = np.linspace( + np.min(features[:, 0]), np.max(features[:, 0]), nb_bins[0] + 1 + ) + tmp2 = np.linspace( + np.min(features[:, 1]), np.max(features[:, 1]), nb_bins[1] + 1 + ) + + np.testing.assert_almost_equal(tmp1[0:-1] + np.diff(tmp1) / 2, xy[0]) + np.testing.assert_almost_equal(tmp2[0:-1] + np.diff(tmp2) / 2, xy[1]) + + # Values + for i in tc.keys(): + assert tc[i].shape == nb_bins + np.testing.assert_almost_equal(tc[i], expected[i]) From 7f4fb09bb67497b0733fdbf8010ecee942aa15df Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 21 Jul 2025 16:31:50 +0000 Subject: [PATCH 073/244] couple fixes after review --- doc/examples/tutorial_calcium_imaging.md | 2 ++ main.py | 2 +- pynapple/process/tuning_curves.py | 3 +-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index e274d0d6f..be6c05bb1 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -157,4 +157,6 @@ Authors ^^^ Sofia Skromne Carrasco +Wolf De Wulf + ::: diff --git a/main.py b/main.py index 9528a5f0a..a5a2abf7a 100644 --- a/main.py +++ b/main.py @@ -22,7 +22,7 @@ 120, epochs=wake_ep, range=(0, 2 * np.pi), - feature_names=[("head direction", "rad")], + feature_names=["head direction"], ) # PLOT diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index e2eb01d20..3a6738b21 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -1,6 +1,5 @@ """ -Functions to compute tuning curves for features in 1 dimension or 2 dimension. - +Functions to compute n-dimensional tuning curves. """ import inspect From 64d7338c4ce73e063313ee6e25ed5201dc328ec6 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 22 Jul 2025 20:04:57 +0000 Subject: [PATCH 074/244] addressing review --- doc/user_guide/07_decoding.md | 61 ++++++++++++++++++----------------- pynapple/process/decoding.py | 48 +++++++++++++++------------ tests/test_decoding.py | 26 ++++----------- 3 files changed, 66 insertions(+), 69 deletions(-) diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index e43d70635..5afbdfea6 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -24,17 +24,23 @@ custom_params = {"axes.spines.right": False, "axes.spines.top": False} sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params) ``` - -Pynapple supports n-dimensional bayesian decoding. -The function returns the decoded feature as well as the probabilities for each timestamp. - - -:::{hint} -Input to the bayesian decoding functions always include the tuning curves computed using [`nap.compute_tuning_curves`](pynapple.process.tuning_curves.compute_tuning_curves). +Input to the decoding functions always includes: + - `tuning_curves`, computed using [`nap.compute_tuning_curves`](pynapple.process.tuning_curves.compute_tuning_curves). + - `group`, a group of units as a `TsGroup` (spikes), `TsdFrame` (e.g. smoothed rates), or dict of `Ts`/`Tsd`. + - `epochs`, to restrict decoding to certain intervals. + - `bin_size`, for when you pass spikes. + +## Bayesian decoding +Pynapple supports n-dimensional decoding from spikes in the form of Bayesian decoding with a Poisson assumption. +In addition to the default arguments, users can set `uniform_prior=False` to use the occupancy as a prior over the feature distribution. +By default `uniform_prior=True`, and a uniform prior is used. + +:::{important} +Bayesian decoding should only be used with spike or rate data, as these can be assumed to follow a Poisson distribution! ::: -## 1-dimensional decoding - + +### 1-dimensional Bayesian decoding ```{code-cell} ipython3 :tags: [hide-cell] @@ -66,7 +72,7 @@ tsgroup = nap.TsGroup({i:nap.Ts(timestep[count[:,i]]) for i in range(N)}) epochs = nap.IntervalSet(0, 10) ``` -To decode, we need to compute tuning curves in 1D. +First, we compute the tuning curves: ```{code-cell} ipython3 tuning_curves_1d = nap.compute_tuning_curves( @@ -74,8 +80,6 @@ tuning_curves_1d = nap.compute_tuning_curves( ) ``` -We can display the tuning curve of each neuron: - ```{code-cell} ipython3 :tags: [hide-input] tuning_curves_1d.name = "Firing rate" @@ -84,14 +88,14 @@ tuning_curves_1d.plot.line(x="feature", add_legend=False) plt.show() ``` -`nap.decode_bayes` performs bayesian decoding: +We can then use `nap.decode_bayes` for Bayesian decoding: ```{code-cell} ipython3 decoded, proba_feature = nap.decode_bayes( - tuning_curves=tuning_curves_1d, # 1D tuning curves - group=tsgroup, # Spiking activity - epochs=epochs, # Small epoch - bin_size=0.06, # How to bin the spike trains + tuning_curves=tuning_curves_1d, + group=tsgroup, + epochs=epochs, + bin_size=0.06, ) ``` @@ -113,7 +117,7 @@ plt.xlabel("Time (s)") plt.show() ``` -## 2-dimensional decoding +### 2-dimensional Bayesian decoding ```{code-cell} ipython3 :tags: [hide-cell] @@ -140,20 +144,19 @@ for i in range(12): ts_group = nap.TsGroup(ts_group, time_support=epochs) ``` -To decode two dimensions, we need to compute tuning curves in 2D: +Decoding also works with multiple dimensions. +First, we compute the tuning curves: ```{code-cell} ipython3 tuning_curves_2d = nap.compute_tuning_curves( - group=ts_group, # Spiking activity of 12 neurons - features=features, # 2-dimensional features + group=ts_group, + features=features, # containing 2 features bins=10, epochs=epochs, - range=[(-1.0, 1.0), (-1.0, 1.0)], # Minmax of the features + range=[(-1.0, 1.0), (-1.0, 1.0)], # range can be specified for each feature ) ``` -We can again display the tuning curve of each neuron: - ```{code-cell} ipython3 :tags: [hide-input] tuning_curves_2d.name = "Firing rate" @@ -162,14 +165,14 @@ tuning_curves_2d.plot(row="unit", col_wrap=6) plt.show() ``` -and `nap.decode_bayes` again performs bayesian decoding: +and then, `nap.decode_bayes` again performs bayesian decoding: ```{code-cell} ipython3 decoded, proba_feature = nap.decode_bayes( - tuning_curves=tuning_curves_2d, # 2D tuning curves - group=ts_group, # Spiking activity - epochs=epochs, # Epoch - bin_size=0.1, # How to bin the spike trains + tuning_curves=tuning_curves_2d, + group=ts_group, + epochs=epochs, + bin_size=0.1, ) ``` diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 8f6f06f7f..69f4d4813 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -11,7 +11,7 @@ def decode_bayes( - tuning_curves, group, epochs, bin_size, time_units="s", use_occupancy=False + tuning_curves, group, epochs, bin_size, time_units="s", uniform_prior=True ): """ Performs Bayesian decoding over n-dimensional features. @@ -36,9 +36,10 @@ def decode_bayes( Bin size. Default is second. Use the parameter time_units to change it. time_units : str, optional Time unit of the bin size ('s' [default], 'ms', 'us'). - use_occupancy : bool, optional - If True, uses the occupancy from the tuning curves to adjust the probability distribution. - If False, uses a uniform distribution. Default is False. + uniform_prior : bool, optional + If True (default), uses a uniform distribution as a prior. + If False, uses the occupancy from the tuning curves as a prior over the feature + probability distribution. Returns ------- @@ -221,17 +222,14 @@ def decode_bayes( else: raise TypeError("Unknown format for group.") - if use_occupancy: + if uniform_prior: + occupancy = np.ones_like(tuning_curves[0]).flatten() + else: if "occupancy" not in tuning_curves.attrs: raise ValueError( - "use_occupancy set to True but no occupancy found in tuning curves." + "uniform_prior set to False but no occupancy found in tuning curves." ) - occupancy = tuning_curves.attrs["occupancy"] - if occupancy.shape != tuning_curves.shape[1:]: - raise ValueError("Occupancy shape does not match tuning curves shape.") - occupancy = occupancy.flatten() - else: - occupancy = np.ones_like(tuning_curves[0]).flatten() + occupancy = tuning_curves.attrs["occupancy"].flatten() # Transforming to pure numpy array tc = tuning_curves.values.reshape(tuning_curves.sizes["unit"], -1).T @@ -253,11 +251,19 @@ def decode_bayes( idxmax = np.argmax(p, 1) p = p.reshape(p.shape[0], *tuning_curves.shape[1:]) - p = getattr(nap, f"Tsd{'Tensor' if p.ndim > 2 else 'Frame'}")( - t=count.index, - d=p, - time_support=epochs, - ) + if p.ndim > 2: + p = nap.TsdTensor( + t=count.index, + d=p, + time_support=epochs, + ) + else: + p = nap.TsdFrame( + t=count.index, + d=p, + time_support=epochs, + columns=tuning_curves.coords[tuning_curves.dims[1]].values, + ) idxmax = np.unravel_index(idxmax, tuning_curves.shape[1:]) @@ -286,7 +292,7 @@ def decode_bayes( def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): warnings.warn( - "decode_1d is deprecated and will be removed in v1.0; use decode instead.", + "decode_1d is deprecated and will be removed in a future version; use decode instead.", DeprecationWarning, stacklevel=2, ) @@ -315,13 +321,13 @@ def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): ep, bin_size, time_units=time_units, - use_occupancy=feature is not None, + uniform_prior=feature is None, ) def decode_2d(tuning_curves, group, ep, bin_size, xy, time_units="s", features=None): warnings.warn( - "decode_2d is deprecated and will be removed in v1.0; use decode instead.", + "decode_2d is deprecated and will be removed in a future version; use decode instead.", DeprecationWarning, stacklevel=2, ) @@ -353,5 +359,5 @@ def decode_2d(tuning_curves, group, ep, bin_size, xy, time_units="s", features=N ep, bin_size, time_units=time_units, - use_occupancy=features is not None, + uniform_prior=features is None, ) diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 8d3ca10f9..a0093a95f 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -158,33 +158,21 @@ def get_testing_set_n(n_features=1, binned=False): get_testing_set_n(3, binned=True), does_not_raise(), ), - # use_occupancy + # uniform_prior ( { - "use_occupancy": True, + "uniform_prior": False, "tuning_curves": (lambda x: (x.attrs.clear(), x)[1])( get_testing_set_n()["tuning_curves"] ), }, pytest.raises( ValueError, - match="use_occupancy set to True but no occupancy found in tuning curves.", + match="uniform_prior set to False but no occupancy found in tuning curves.", ), ), ( - { - "use_occupancy": True, - "tuning_curves": get_testing_set_n(1)["tuning_curves"].assign_attrs( - {"occupancy": np.array([1, 2, 3])} - ), - }, - pytest.raises( - ValueError, - match="Occupancy shape does not match tuning curves shape.", - ), - ), - ( - {"use_occupancy": True}, + {"uniform_prior": True}, does_not_raise(), ), ], @@ -197,10 +185,10 @@ def test_decode_bayes_type_errors(overwrite_default_args, expectation): nap.decode_bayes(**default_args) -@pytest.mark.parametrize("use_occupancy", [True, False]) +@pytest.mark.parametrize("uniform_prior", [True, False]) @pytest.mark.parametrize("n_features", [1, 2, 3]) @pytest.mark.parametrize("binned", [True, False]) -def test_decode_bayes(n_features, binned, use_occupancy): +def test_decode_bayes(n_features, binned, uniform_prior): features, tuning_curves, group, epochs, bin_size = get_testing_set_n( n_features, binned=binned ).values() @@ -210,7 +198,7 @@ def test_decode_bayes(n_features, binned, use_occupancy): epochs=epochs, bin_size=bin_size, time_units="s", - use_occupancy=use_occupancy, + uniform_prior=uniform_prior, ) assert isinstance(decoded, nap.Tsd if features.shape[1] == 1 else nap.TsdFrame) From 9e4c58626104b9a0660a04587514a4d98e9fea3c Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Wed, 23 Jul 2025 17:16:20 -0400 Subject: [PATCH 075/244] check all methods --- scripts/check_parameter_naming.py | 288 +++++++++++++++++++++--------- 1 file changed, 199 insertions(+), 89 deletions(-) diff --git a/scripts/check_parameter_naming.py b/scripts/check_parameter_naming.py index 29c378092..608d3b22d 100644 --- a/scripts/check_parameter_naming.py +++ b/scripts/check_parameter_naming.py @@ -1,142 +1,252 @@ +import ast import difflib import inspect +import itertools import logging +import os +import pathlib import sys import types +from collections import defaultdict +from typing import Dict, List, Optional -import pynapple as nap +# Pairs of parameter names that are lexically similar but intentionally allowed. +# During parameter name similarity checks, some pairs of names may be flagged +# as potentially inconsistent due to their high string similarity. This list +# enumerates such known, acceptable pairs that should be *excluded* from warnings. + +# Each pair is stored as a set of two strings (e.g., {"a", "a_1"}), and comparison +# is done using set equality, i.e., order does not matter. + +# These typically include: +# - semantically equivalent alternatives (e.g., {"conv_time_series", "time_series"}) +# - mirrored structures (e.g., {"inhib_a", "inhib_b"}) +# - systematic naming conventions (e.g., {"basis1", "basis2"}) +# - commonly used argument patterns (e.g., {"args", "kwargs"}) VALID_PAIRS = [ {"ep", "sep"}, {"ts", "tsd"}, {"args", "kwargs"}, {"channel", "n_channels"}, + {"interval_size", "intervalset"}, + {"new_time_support", "time_support"}, + {"ufunc", "func"}, + {"keys", "key"}, + {"value", "values"}, + *({a, b} for (a, b) in itertools.combinations(["starts", "start", "start1", "start2"], r=2)), + *({a, b} for (a, b) in itertools.combinations(["ends", "end", "end1", "end2"], r=2)), + {"windowsize", "window"}, + {"windowsize", "windows"}, + ] -def collect_similar_parameter_names(package, root_name=None, similarity_cutoff=0.8): +def handle_matches( + current_parameter: str, + current_path: str, + matches: List[str], + results: Dict, + valid_pairs: List[set[str]], +): """ - Recursively collect and group similar parameter names from functions and methods. + Handle matched parameter names by updating or creating groups in the results dictionary. + + A parameter is considered valid if it has no matches or if all its matches appear in + `valid_pairs` as a set with the current parameter. Valid parameters are added as new entries + in the results dictionary. Invalid parameters (i.e., those with partial or conflicting matches) + are added to existing groups if any of their matches are already present in those groups. - This function traverses the given package and its submodules, extracting parameter - names from all user-defined functions and methods. Parameter names that are - lexically similar (based on `difflib.get_close_matches`) are grouped together. - This can be used to detect inconsistent naming conventions across a codebase. + Note: This function allows overlapping groups. If `current_parameter` is similar to multiple + parameter groups (e.g., "timin" may match both "time" and "timing"), it will be added to each + of the matching groups independently. Parameters ---------- - package : module - The root package to analyze (e.g., `pynapple`). - root_name : str, optional - The dotted name of the root package. If not provided, it is inferred from - `package.__name__`. - similarity_cutoff : float, optional - Similarity threshold between 0 and 1 used to group parameters based on - lexical similarity (default is 0.8). - - Returns - ------- - dict - A dictionary mapping canonical parameter names to a list of tuples. - Each tuple contains: - - The actual parameter name - - The fully qualified function or method path where it appears - - Example - ------- - { - "time": [("time", "pynapple.core.Tsd.__init__"), ("t", "pynapple.io.load")], - ... - } - """ - if root_name is None: - root_name = package.__name__ + current_parameter : + The name of the parameter currently being processed. - results = {} - visited_ids = set() + current_path : + The path or context in which the parameter was found (e.g., a file or data structure path). + + matches : + A list of other parameter names that are similar to ``current_parameter``. - def process_function(func, path): - try: - sig = inspect.signature(func) - param_names = list(sig.parameters) + results : + A dictionary of grouped parameters. Keys are group names, and values are dictionaries + containing: + - "unique_names": a set of parameter names in the group. + - "info": a list of (parameter, path) tuples for matched entries. + + valid_pairs : + A list of valid two-element sets. Each set contains a pair of parameter names that are + considered equivalent or compatible. + + """ + # a parameter name is valid if no matches or all matches in valid pairs + list_invalid = [ + match for match in matches if {match, current_parameter} not in valid_pairs + ] + if len(list_invalid) == 0: + # if all matches are valid, create a new group for this parameter + results[current_parameter] = { + "unique_names": {current_parameter}, + "info": [(current_parameter, current_path)], + } + else: + + # if there is an invalid match, then add to existing result entry + for k, v in results.items(): + # Otherwise, add the parameter to any existing groups where it has a match + # + # Note: We *intentionally allow overlapping groups*. If `current_parameter` + # is similar to multiple different parameter groups + # (e.g. "timin" may be similar to both "time" and "timing", but "time" and "timing" may + # belong to two different groups), + # it will be added to each of those groups. + is_in_category = any(match in v["unique_names"] for match in list_invalid) + if is_in_category: + v["info"].append((current_parameter, current_path)) + v["unique_names"].add(current_parameter) + + +def extract_parameters_from_ast( + tree: ast.Module, + file_path: pathlib.Path, + results: Dict, + valid_pairs: List[set[str]], + unique_param_names: set, + similarity_cutoff: float, +): + + class ParamVisitor(ast.NodeVisitor): + def __init__(self): + self.class_name = None + + def visit_ClassDef(self, node): + prev_class = self.class_name + self.class_name = node.name + self.generic_visit(node) + self.class_name = prev_class + + def visit_FunctionDef(self, node): + qualified_name = ( + f"{self.class_name}.{node.name}" if self.class_name else node.name + ) + param_names = [str(arg.arg) for arg in node.args.args] for par in param_names: + # if perfect match is present just add there if par in results: - results[par].append((par, path)) - continue # exact name already exists store - match = difflib.get_close_matches( - par, results.keys(), n=1, cutoff=similarity_cutoff + results[par]["unique_names"].add(par) + results[par]["info"].append( + (par, f"{file_path.as_posix()}:{qualified_name}") + ) + continue + + matches = difflib.get_close_matches( + par, unique_param_names, n=100, cutoff=similarity_cutoff + ) + handle_matches( + par, + f"{file_path.as_posix()}:{qualified_name}", + matches, + results, + valid_pairs, ) - if match and not {match[0], par} in VALID_PAIRS: - results[match[0]].append((par, path)) - else: - results[par] = [(par, path)] - except Exception: - pass # some built-ins or extension modules may not support signature() - - def walk(obj, path_prefix=""): - if id(obj) in visited_ids: - return - visited_ids.add(id(obj)) - - if inspect.isfunction(obj) or inspect.ismethod(obj): - if getattr(obj, "__module__", "").startswith(root_name): - process_function(obj, path_prefix) - - elif inspect.isclass(obj): - if getattr(obj, "__module__", "").startswith(root_name): - for attr in inspect.classify_class_attrs(obj): - # attrs is a convenient named tuple with fields - # (name, kind, defining_class, object) - if attr.kind == "method": - process_function(attr.object, f"{path_prefix}.{attr.name}") - - elif isinstance(obj, types.ModuleType): - if not getattr(obj, "__name__", "").startswith(root_name): - return # external module, skip - for name, member in inspect.getmembers(obj): - if name.startswith("_"): + unique_param_names.add(par) + self.generic_visit(node) + + def visit_AsyncFunctionDef(self, node): + self.visit_FunctionDef(node) + + ParamVisitor().visit(tree) + + +def collect_similar_parameter_names_ast( + root_dir: str | pathlib.Path, + similarity_cutoff: float = 0.8, + valid_pairs: Optional[List[set[str]]] = None, +) -> Dict[str, Dict]: + if valid_pairs is None: + valid_pairs = VALID_PAIRS + + results = {} + unique_param_names = set() + + for dirpath, _, filenames in os.walk(root_dir): + dirpath = pathlib.Path(dirpath) + + if "third_party" in dirpath.parts: + continue + + for filename in filenames: + if filename.endswith(".py"): + full_path = dirpath / filename + try: + with open(full_path, "r", encoding="utf-8") as f: + source = f.read() + tree = ast.parse(source, filename=full_path) + extract_parameters_from_ast( + tree, + full_path, + results, + valid_pairs, + unique_param_names, + similarity_cutoff, + ) + except (UnicodeDecodeError, FileNotFoundError): continue - walk(member, f"{path_prefix}.{name}") - walk(package, package.__name__) return results if __name__ == "__main__": import argparse + import logging + import sys + + default_path = pathlib.Path(__file__).parent.parent / "pynapple" parser = argparse.ArgumentParser( - description="Check for inconsistent parameter naming." + description="Check parameter naming consistency using AST." + ) + parser.add_argument( + "--path", + "-p", + type=pathlib.Path, + help="Root path to the package (source folder).", + default=default_path, ) parser.add_argument( "--threshold", "-t", type=float, default=0.8, - help="Similarity threshold (between 0 and 1) for grouping parameter names (default: 0.9)", + help="Similarity threshold for parameter name grouping.", ) args = parser.parse_args() logger = logging.getLogger("check_parameter_naming") logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") - params = collect_similar_parameter_names(nap, similarity_cutoff=args.threshold) - - # Filter out parameter names that occur only once - for name, occurrences in list(params.items()): - if all(o == name for o, _ in occurrences): - params.pop(name) + params = collect_similar_parameter_names_ast( + args.path, similarity_cutoff=args.threshold + ) + invalid = [name for name, d in params.items() if len(d["unique_names"]) > 1] - if params: + if invalid: msg_lines = ["Inconsistency in parameter naming found!\n"] - for name, occurrences in params.items(): + for name in invalid: msg_lines.append(f"{name}:\n") - for param_name, path in occurrences: - msg_lines.append(f"\t- {path}: {param_name}\n") + grouped_info = defaultdict(list) + for param_name, path in sorted(params[name]["info"], key=lambda x: x[1]): + grouped_info[param_name].append(path) + for param_name in sorted(params[name]["unique_names"]): + msg_lines.append(f"\t- {param_name}:\n") + for path in grouped_info[param_name]: + msg_lines.append(f"\t\t- {path}\n") msg_lines.append("\n") - logger.warning("".join(msg_lines)) - # TODO: change this sys.exit(1) to fail the CI - sys.exit(0) + sys.exit(1) else: logger.info("No parameter naming inconsistencies found.") From 57fd49a699d054f39a9191ab42c6bbb145b27fbb Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Wed, 23 Jul 2025 17:19:12 -0400 Subject: [PATCH 076/244] log as error --- scripts/check_parameter_naming.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/check_parameter_naming.py b/scripts/check_parameter_naming.py index 608d3b22d..483ce2b32 100644 --- a/scripts/check_parameter_naming.py +++ b/scripts/check_parameter_naming.py @@ -38,7 +38,6 @@ *({a, b} for (a, b) in itertools.combinations(["ends", "end", "end1", "end2"], r=2)), {"windowsize", "window"}, {"windowsize", "windows"}, - ] @@ -246,7 +245,7 @@ def collect_similar_parameter_names_ast( for path in grouped_info[param_name]: msg_lines.append(f"\t\t- {path}\n") msg_lines.append("\n") - logger.warning("".join(msg_lines)) + logger.error("".join(msg_lines)) sys.exit(1) else: logger.info("No parameter naming inconsistencies found.") From fe70067cc23d617ba9f70378f00da021d53426b0 Mon Sep 17 00:00:00 2001 From: BalzaniEdoardo Date: Wed, 23 Jul 2025 17:30:36 -0400 Subject: [PATCH 077/244] fix par naming --- scripts/check_parameter_naming.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/scripts/check_parameter_naming.py b/scripts/check_parameter_naming.py index 483ce2b32..a99d12fab 100644 --- a/scripts/check_parameter_naming.py +++ b/scripts/check_parameter_naming.py @@ -1,12 +1,8 @@ import ast import difflib -import inspect import itertools -import logging import os import pathlib -import sys -import types from collections import defaultdict from typing import Dict, List, Optional From b0c1ac685c368d4a79137933dcfc712c2560890b Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 24 Jul 2025 21:16:48 +0000 Subject: [PATCH 078/244] better decoding plots --- doc/user_guide/07_decoding.md | 105 ++++++++++++++++++++-------------- 1 file changed, 63 insertions(+), 42 deletions(-) diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index 5afbdfea6..69679ddf9 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -104,16 +104,29 @@ decoded, proba_feature = nap.decode_bayes( ```{code-cell} ipython3 :tags: [hide-input] -plt.figure(figsize=(12, 6)) -plt.subplot(211) -plt.plot(feature.restrict(epochs), label="True") -plt.plot(decoded, label="Decoded") -plt.legend() -plt.xlim(epochs[0,0], epochs[0,1]) -plt.subplot(212) -plt.imshow(proba_feature.values.T, aspect="auto", origin="lower", cmap="viridis") -plt.xticks([0, len(decoded)], epochs.values[0]) -plt.xlabel("Time (s)") +fig, (ax1, ax2) = plt.subplots(figsize=(8, 5), nrows=2, ncols=1, sharex=True) +ax1.plot( + np.linspace(0, len(decoded), len(feature.restrict(epochs))), + feature.restrict(epochs), + label="True", +) +ax1.scatter( + np.linspace(0, len(decoded), len(decoded)), + decoded, + label="Decoded", + c="orange", +) +ax1.legend( + frameon=False, + bbox_to_anchor=(1.0, 1.0), +) +ax1.set_xlim(epochs[0, 0], epochs[0, 1]) +im = ax2.imshow(proba_feature.values.T, aspect="auto", origin="lower", cmap="viridis") +cbar_ax = fig.add_axes([0.93, 0.1, 0.015, 0.36]) +fig.colorbar(im, cax=cbar_ax, label="Probability") +ax2.set_xticks([0, len(decoded)], epochs.values[0]) +ax2.set_yticks([]) +ax2.set_xlabel("Time (s)", labelpad=-20) plt.show() ``` @@ -124,7 +137,8 @@ plt.show() dt = 0.1 epochs = nap.IntervalSet(start=0, end=1000, time_units="s") features = np.vstack((np.cos(np.arange(0, 1000, dt)), np.sin(np.arange(0, 1000, dt)))).T -features = nap.TsdFrame(t=np.arange(0, 1000, dt), +features = nap.TsdFrame( + t=np.arange(0, 1000, dt), d=features, time_units="s", time_support=epochs, @@ -134,14 +148,16 @@ features = nap.TsdFrame(t=np.arange(0, 1000, dt), times = features.as_units("us").index.values ft = features.values alpha = np.arctan2(ft[:, 1], ft[:, 0]) -bins = np.repeat(np.linspace(-np.pi, np.pi, 13)[::, np.newaxis], 2, 1) -bins += np.array([-2 * np.pi / 24, 2 * np.pi / 24]) +bin_centers = np.linspace(-np.pi, np.pi, 12) +kappa = 4.0 ts_group = {} -for i in range(12): - ts = times[(alpha >= bins[i, 0]) & (alpha <= bins[i + 1, 1])] +for i, mu in enumerate(bin_centers): + weights = np.exp(kappa * np.cos(alpha - mu)) # wrapped Gaussian + weights /= np.max(weights) # normalize to 0–1 + mask = weights > 0.5 + ts = times[mask] ts_group[i] = nap.Ts(ts, time_units="us") - -ts_group = nap.TsGroup(ts_group, time_support=epochs) +ts_group = nap.TsGroup(ts_group) ``` Decoding also works with multiple dimensions. @@ -172,39 +188,44 @@ decoded, proba_feature = nap.decode_bayes( tuning_curves=tuning_curves_2d, group=ts_group, epochs=epochs, - bin_size=0.1, + bin_size=0.2, ) ``` ```{code-cell} ipython3 :tags: [hide-input] -plt.figure(figsize=(15, 5)) -plt.subplot(131) -plt.plot(features["a"].get(0,20), label="True") -plt.plot(decoded["a"].get(0,20), label="Decoded") -plt.legend() -plt.xlabel("Time (s)") -plt.ylabel("Feature a") -plt.subplot(132) -plt.plot(features["b"].get(0,20), label="True") -plt.plot(decoded["b"].get(0,20), label="Decoded") -plt.legend() -plt.xlabel("Time (s)") -plt.title("Feature b") -plt.subplot(133) -plt.plot( - features["a"].get(0,20), - features["b"].get(0,20), +fig, (ax1, ax2, ax3) = plt.subplots(figsize=(8, 3), nrows=1, ncols=3, sharey=True) +ax1.plot(features["a"].get(0, 20), label="True") +ax1.scatter( + decoded["a"].get(0, 20).times(), + decoded["a"].get(0, 20), + label="Decoded", + c="orange", +) +ax1.set_title("Feature a") +ax1.set_xlabel("Time (s)") + +ax2.plot(features["b"].get(0, 20), label="True") +ax2.scatter( + decoded["b"].get(0, 20).times(), + decoded["b"].get(0, 20), + label="Decoded", + c="orange", +) +ax2.set_xlabel("Time (s)") +ax2.set_title("Feature b") + +ax3.plot( + features["a"].get(0, 20), + features["b"].get(0, 20), label="True", ) -plt.plot( - decoded["a"].get(0,20), - decoded["b"].get(0,20), +ax3.scatter( + decoded["a"].get(0, 20), + decoded["b"].get(0, 20), label="Decoded", + c="orange", ) -plt.xlabel("Feature a") -plt.title("Feature b") -plt.legend() -plt.tight_layout() +ax3.set_title("Combined") plt.show() ``` From 0e8bfce1eb503e07d2942b3d749470545f1e285d Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 24 Jul 2025 21:32:56 +0000 Subject: [PATCH 079/244] group -> data --- doc/examples/tutorial_HD_dataset.md | 2 +- doc/examples/tutorial_phase_preferences.md | 2 +- doc/user_guide/03_metadata.md | 4 +- doc/user_guide/06_tuning_curves.md | 16 +++---- doc/user_guide/07_decoding.md | 2 +- pynapple/process/tuning_curves.py | 50 +++++++++++----------- tests/test_decoding.py | 4 +- tests/test_metadata.py | 2 +- tests/test_tuning_curves.py | 16 +++---- 9 files changed, 49 insertions(+), 49 deletions(-) diff --git a/doc/examples/tutorial_HD_dataset.md b/doc/examples/tutorial_HD_dataset.md index c1b59f830..304b7a30e 100644 --- a/doc/examples/tutorial_HD_dataset.md +++ b/doc/examples/tutorial_HD_dataset.md @@ -104,7 +104,7 @@ Let's plot firing rate of ADn units as a function of heading direction, i.e. a h ```{code-cell} ipython3 tuning_curves = nap.compute_tuning_curves( - group=spikes_adn, + data=spikes_adn, features=angle, bins=61, epochs=epochs[epochs.tags == "wake"], diff --git a/doc/examples/tutorial_phase_preferences.md b/doc/examples/tutorial_phase_preferences.md index 7ee21dc95..40f3c98e0 100644 --- a/doc/examples/tutorial_phase_preferences.md +++ b/doc/examples/tutorial_phase_preferences.md @@ -211,7 +211,7 @@ The feature is the theta phase during REM sleep. ```{code-cell} ipython3 phase_modulation = nap.compute_tuning_curves( - group=spikes, + data=spikes, features=theta_phase, bins=61, range=(-np.pi, np.pi), diff --git a/doc/user_guide/03_metadata.md b/doc/user_guide/03_metadata.md index cc1cbf83a..4df5beee8 100644 --- a/doc/user_guide/03_metadata.md +++ b/doc/user_guide/03_metadata.md @@ -271,7 +271,7 @@ An anonymous function can also be used to apply a function where the grouped obj ```{code-cell} ipython3 func = lambda x: nap.compute_tuning_curves( - group=tsgroup, + data=tsgroup, features=feature, bins=2, epochs=x) @@ -285,7 +285,7 @@ intervalset.groupby_apply( "choice", nap.compute_tuning_curves, input_key="epochs", - group=tsgroup, + data=tsgroup, features=feature, bins=2) ``` diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index 38736cb27..bb1b09a1e 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -73,8 +73,8 @@ tsgroup = nap.TsGroup( ``` When computing from general time-series, mandatory arguments are: -* a `TsGroup`, `Tsd`, or `TsdFrame` containing the neural activity of one or more units. -* a `Tsd` or `TsdFrame` containing one or more features. +* `data`: a `TsGroup`, `Tsd`, or `TsdFrame` containing the neural activity of one or more units. +* `features`: a `Tsd` or `TsdFrame` containing one or more features. By default, 10 bins are used for all features, but you can specify the number of bins, or the bin edges explicitly, using the `bins` argument. @@ -101,7 +101,7 @@ you can set `return_pandas=True`. Note that this will not return the occupancy a ```{code-cell} ipython3 tuning_curves_1d = nap.compute_tuning_curves( - group=tsgroup, + data=tsgroup, features=feature, bins=120, range=(0, 2*np.pi), @@ -186,7 +186,7 @@ tsgroup = nap.TsGroup({ If you pass more than 1 feature, a multi-dimensional tuning curve is computed: ```{code-cell} ipython3 tuning_curves_2d = nap.compute_tuning_curves( - group=tsgroup, + data=tsgroup, features=features, bins=(5,5), range=[(-1, 1), (-1, 1)], @@ -275,7 +275,7 @@ In that case, we can simply pass a `Tsd` or `TsdFrame` as group: ```{code-cell} ipython3 tuning_curves_1d = nap.compute_tuning_curves( - group=tsdframe, + data=tsdframe, features=feature, bins=120, range=(0, 2*np.pi), @@ -319,7 +319,7 @@ tsdframe = nap.TsdFrame( ```{code-cell} ipython3 tuning_curves_2d = nap.compute_tuning_curves( - group=tsdframe, + data=tsdframe, features=features, bins=5, feature_names=["a", "b"] @@ -340,8 +340,8 @@ When computing from epochs, you should store them in a dictionary: ```{code-cell} ipython3 dict_ep = { - "stim0": nap.IntervalSet(start=0, end=20), - "stim1":nap.IntervalSet(start=30, end=70) + "stim0": nap.IntervalSet(start=0, end=20), + "stim1":nap.IntervalSet(start=30, end=70) } ``` diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index 58bffd738..b08a0627c 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -144,7 +144,7 @@ To decode, we need to compute tuning curves in 2D. ```{code-cell} ipython3 tuning_curves_2d = nap.compute_tuning_curves( - group=ts_group, # Spiking activity of 12 neurons + data=ts_group, # Spiking activity of 12 neurons features=features, # 2-dimensional features bins=10, epochs=epochs, diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 3a6738b21..30b87e041 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -82,7 +82,7 @@ def wrapper(*args, **kwargs): def compute_tuning_curves( - group, + data, features, bins=10, range=None, @@ -96,8 +96,8 @@ def compute_tuning_curves( Parameters ---------- - group : TsGroup, TsdFrame or dict of Ts, Tsd - The group of Ts or Tsd for which the tuning curves will be computed + data : TsGroup, TsdFrame or dict of Ts, Tsd + The data for which the tuning curves will be computed. features : Tsd, TsdFrame The features (i.e. one column per feature). bins : sequence or int @@ -152,7 +152,7 @@ def compute_tuning_curves( Coordinates: * unit (unit) int64 16B 1 2 * 0 (0) float64 80B 0.045 0.135 0.225 0.315 ... 0.585 0.675 0.765 0.855 - Attributes: + Attributes: occupancy: [100. 100. 100. 100. 100. 100. 100. 100. 100. 100.] bin_edges: [array([0. , 0.09, 0.18, 0.27, 0.36, 0.45, 0.54, 0.63, 0.72,... @@ -187,7 +187,7 @@ def compute_tuning_curves( * unit (unit) int64 16B 1 2 * 0 (0) float64 40B 0.09 0.27 0.45 0.63 0.81 * 1 (1) float64 24B 0.3167 0.95 1.583 - Attributes: + Attributes: occupancy: [[100. 100. nan]\\n [100. 50. 50.]\\n [100. nan 100.]\\n [ 5... bin_edges: [array([0. , 0.18, 0.36, 0.54, 0.72, 0.9 ]), array([0. ... @@ -213,14 +213,14 @@ def compute_tuning_curves( * unit (unit) int64 16B 1 2 * 0 (0) float64 32B 0.125 0.375 0.625 0.875 * 1 (1) float64 16B 0.5 1.5 - Attributes: + Attributes: occupancy: [[150. 150.]\\n [100. 100.]\\n [150. 150.]\\n [100. 100.]] bin_edges: [array([0. , 0.25, 0.5 , 0.75, 1. ]), array([0., 1., 2.])] In all of these cases, it is also possible to pass continuous values instead of spikes (e.g. calcium imaging data): - >>> group = nap.TsdFrame(d=np.random.rand(2000, 3), t=np.arange(0, 100, 0.05)) - >>> tcs = nap.compute_tuning_curves(group, feature, bins=10) + >>> frame = nap.TsdFrame(d=np.random.rand(2000, 3), t=np.arange(0, 100, 0.05)) + >>> tcs = nap.compute_tuning_curves(frame, feature, bins=10) >>> tcs Size: 240B array([[0.49147343, 0.50190395, 0.50971339, 0.50128013, 0.54332711, @@ -232,22 +232,22 @@ def compute_tuning_curves( Coordinates: * unit (unit) int64 24B 0 1 2 * 0 (0) float64 80B 0.045 0.135 0.225 0.315 ... 0.585 0.675 0.765 0.855 - Attributes: + Attributes: occupancy: [100. 100. 100. 100. 100. 100. 100. 100. 100. 100.] bin_edges: [array([0. , 0.09, 0.18, 0.27, 0.36, 0.45, 0.54, 0.63, 0.72,... """ - # check group - if isinstance(group, dict): - group = nap.TsGroup(group) - if isinstance(group, nap.Tsd): - group = nap.TsdFrame( - d=group.values, - t=group.times(), - time_support=group.time_support, + # check data + if isinstance(data, dict): + data = nap.TsGroup(data) + if isinstance(data, nap.Tsd): + data = nap.TsdFrame( + d=data.values, + t=data.times(), + time_support=data.time_support, ) - elif not isinstance(group, (nap.TsGroup, nap.TsdFrame)): - raise TypeError("group should be a Tsd, TsdFrame, TsGroup, or dict.") + elif not isinstance(data, (nap.TsGroup, nap.TsdFrame)): + raise TypeError("data should be a Tsd, TsdFrame, TsGroup, or dict.") # check features if isinstance(features, nap.Tsd): @@ -279,7 +279,7 @@ def compute_tuning_curves( features = features.restrict(epochs) else: raise TypeError("epochs should be an IntervalSet.") - group = group.restrict(epochs) + data = data.restrict(epochs) # check fs if fs is None: @@ -304,26 +304,26 @@ def compute_tuning_curves( occupancy, bin_edges = np.histogramdd(features, bins=bins, range=range) # tunning curves - keys = group.keys() if isinstance(group, nap.TsGroup) else group.columns + keys = data.keys() if isinstance(data, nap.TsGroup) else data.columns tcs = np.zeros([len(keys), *occupancy.shape]) - if isinstance(group, nap.TsGroup): + if isinstance(data, nap.TsGroup): # SPIKES for i, n in enumerate(keys): tcs[i] = np.histogramdd( - group[n].value_from(features, epochs), + data[n].value_from(features, epochs), bins=bin_edges, )[0] occupancy[occupancy == 0.0] = np.nan tcs = (tcs / occupancy) * fs else: # RATES - values = group.value_from(features, epochs) + values = data.value_from(features, epochs) counts = np.histogramdd(values, bins=bin_edges)[0] counts[counts == 0] = np.nan for i, n in enumerate(keys): tcs[i] = np.histogramdd( values, - weights=group.values[:, i], + weights=data.values[:, i], bins=bin_edges, )[0] tcs /= counts diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 421ac4e52..b2056f138 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -19,7 +19,7 @@ def get_testing_set_1d(): group = nap.TsGroup({i: nap.Ts(t=np.arange(0, 50) + 50 * i) for i in range(2)}) tc = ( nap.compute_tuning_curves( - group=group, features=feature, bins=2, range=(-0.5, 1.5) + data=group, features=feature, bins=2, range=(-0.5, 1.5) ) .to_pandas() .T @@ -136,7 +136,7 @@ def get_testing_set_2d(): ) tc = nap.compute_tuning_curves( - group=group, features=features, bins=2, range=[(-0.5, 1.5), (-0.5, 1.5)] + data=group, features=features, bins=2, range=[(-0.5, 1.5), (-0.5, 1.5)] ) xy = [tc.coords[dim].values for dim in tc.coords if dim != "unit"] tc = {c: tc.sel(unit=c).values for c in tc.coords["unit"].values} diff --git a/tests/test_metadata.py b/tests/test_metadata.py index 1ac59f08c..8d8db8555 100644 --- a/tests/test_metadata.py +++ b/tests/test_metadata.py @@ -2388,7 +2388,7 @@ def test_metadata_groupby_apply_tuning_curves(self, tsgroup_gba, iset_gba): "label", nap.compute_tuning_curves, "epochs", - group=tsgroup_gba, + data=tsgroup_gba, features=feature, bins=5, ) diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 09a66c0ce..4cecbd99d 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -27,7 +27,7 @@ def get_features_n(n, fs=10.0): @pytest.mark.parametrize( - "group, features, kwargs, expectation", + "data, features, kwargs, expectation", [ # group ( @@ -35,7 +35,7 @@ def get_features_n(n, fs=10.0): get_features_n(1), {}, pytest.raises( - TypeError, match="group should be a Tsd, TsdFrame, TsGroup, or dict." + TypeError, match="data should be a Tsd, TsdFrame, TsGroup, or dict." ), ), ( @@ -43,7 +43,7 @@ def get_features_n(n, fs=10.0): get_features_n(1), {}, pytest.raises( - TypeError, match="group should be a Tsd, TsdFrame, TsGroup, or dict." + TypeError, match="data should be a Tsd, TsdFrame, TsGroup, or dict." ), ), (get_group_n(1), get_features_n(1), {}, does_not_raise()), @@ -293,13 +293,13 @@ def get_features_n(n, fs=10.0): ), ], ) -def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation): +def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): with expectation: - nap.compute_tuning_curves(group, features, **kwargs) + nap.compute_tuning_curves(data, features, **kwargs) @pytest.mark.parametrize( - "group, features, kwargs, expected", + "data, features, kwargs, expected", [ # single rate unit, single feature ( @@ -609,8 +609,8 @@ def test_compute_tuning_curves_type_errors(group, features, kwargs, expectation) ), ], ) -def test_compute_tuning_curves(group, features, kwargs, expected): - tcs = nap.compute_tuning_curves(group, features, **kwargs) +def test_compute_tuning_curves(data, features, kwargs, expected): + tcs = nap.compute_tuning_curves(data, features, **kwargs) if isinstance(expected, pd.DataFrame): pd.testing.assert_frame_equal(tcs, expected) else: From a38f080ddb05c573351ea2af49fb8790d4c317bb Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 24 Jul 2025 21:50:41 +0000 Subject: [PATCH 080/244] add deprecated docstrings --- pynapple/process/decoding.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index f04b93174..605d14786 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -291,6 +291,9 @@ def decode_bayes( def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): + """ + Deprecated, use `decode` instead. + """ warnings.warn( "decode_1d is deprecated and will be removed in a future version; use decode instead.", DeprecationWarning, @@ -326,6 +329,9 @@ def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): def decode_2d(tuning_curves, group, ep, bin_size, xy, time_units="s", features=None): + """ + Deprecated, use `decode` instead. + """ warnings.warn( "decode_2d is deprecated and will be removed in a future version; use decode instead.", DeprecationWarning, From 76c28dc3a9a154fc06b181f41246819505be09e8 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 24 Jul 2025 21:52:06 +0000 Subject: [PATCH 081/244] deprecated messages --- pynapple/process/tuning_curves.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 30b87e041..168d4127d 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -349,6 +349,9 @@ def compute_tuning_curves( @_validate_tuning_inputs def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): + """ + Deprecated, use `compute_tuning_curves` instead. + """ warnings.warn( "compute_1d_tuning_curves is deprecated and will be removed in a future version;" "use compute_tuning_curves instead.", @@ -372,6 +375,9 @@ def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): def compute_1d_tuning_curves_continuous( tsdframe, feature, nb_bins, ep=None, minmax=None ): + """ + Deprecated, use `compute_tuning_curves` instead. + """ warnings.warn( "compute_1d_tuning_curves_continuous is deprecated and will be removed in a future version;" "use compute_tuning_curves instead.", @@ -393,6 +399,9 @@ def compute_1d_tuning_curves_continuous( @_validate_tuning_inputs def compute_2d_tuning_curves(group, features, nb_bins, ep=None, minmax=None): + """ + Deprecated, use `compute_tuning_curves` instead. + """ warnings.warn( "compute_2d_tuning_curves is deprecated and will be removed in a future version;" "use compute_tuning_curves instead.", @@ -417,6 +426,9 @@ def compute_2d_tuning_curves(group, features, nb_bins, ep=None, minmax=None): def compute_2d_tuning_curves_continuous( tsdframe, features, nb_bins, ep=None, minmax=None ): + """ + Deprecated, use `compute_tuning_curves` instead. + """ warnings.warn( "compute_2d_tuning_curves_continuous is deprecated and will be removed in a future version;" "use compute_tuning_curves instead.", From 1829faba46586f0a18407f5fcb445a7c8e0ffae1 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 24 Jul 2025 22:19:00 +0000 Subject: [PATCH 082/244] more interesting example tuning curves --- doc/user_guide/06_tuning_curves.md | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index bb1b09a1e..94f620fd3 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -311,10 +311,16 @@ features = nap.TsdFrame( # Calcium activity -tsdframe = nap.TsdFrame( - t=timestep, - d=np.random.randn(len(timestep), 2) - ) +ft = features.values +alpha = np.arctan2(ft[:, 1], ft[:, 0]) +bin_centers = np.linspace(-np.pi, np.pi, 6) +kappa = 4.0 +units=[] +for i, mu in enumerate(bin_centers): + units.append(np.exp(kappa * np.cos(alpha - mu))) # wrapped Gaussian +units = np.stack(units, axis=1) +units = (units-np.mean(units, axis=0)) / np.std(units, axis=0) +tsdframe = nap.TsdFrame(t=features.times(), d=units) ``` ```{code-cell} ipython3 @@ -330,7 +336,7 @@ tuning_curves_2d ```{code-cell} ipython3 tuning_curves_2d.name="ΔF/F" tuning_curves_2d.attrs["unit"]="a.u." -tuning_curves_2d.plot(col="unit") +tuning_curves_2d.plot(col="unit", col_wrap=3) plt.show() ``` From da1c621bacf619cf4e508661b1a8daa1100e59db Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 24 Jul 2025 22:24:10 +0000 Subject: [PATCH 083/244] remove test file --- test.py | 148 -------------------------------------------------------- 1 file changed, 148 deletions(-) delete mode 100644 test.py diff --git a/test.py b/test.py deleted file mode 100644 index 3d64895d1..000000000 --- a/test.py +++ /dev/null @@ -1,148 +0,0 @@ -import pynapple as nap -import numpy as np -import pandas as pd -import matplotlib.pyplot as plt -import seaborn as sns - -custom_params = {"axes.spines.right": False, "axes.spines.top": False} -sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params) - -from scipy.ndimage import gaussian_filter1d - -# Fake Tuning curves -N = 6 # Number of neurons -bins = np.linspace(0, 2 * np.pi, 61) -x = np.linspace(-np.pi, np.pi, len(bins) - 1) -tmp = np.roll(np.exp(-((1.5 * x) ** 2)), (len(bins) - 1) // 2) -tc = np.array([np.roll(tmp, i * (len(bins) - 1) // N) for i in range(N)]).T - -tc_1d = pd.DataFrame(index=bins[0:-1], data=tc) - -# Feature -T = 10000 -dt = 0.01 -timestep = np.arange(0, T) * dt -feature = nap.Tsd( - t=timestep, - d=gaussian_filter1d(np.cumsum(np.random.randn(T) * 0.5), 20) % (2 * np.pi), -) -index = np.digitize(feature, bins) - 1 - -# Spiking activity - -count = np.random.poisson(tc[index]) > 0 -tsgroup = nap.TsGroup({i: nap.Ts(timestep[count[:, i]]) for i in range(N)}) -epochs = nap.IntervalSet(5, 10) - - -tuning_curves_1d = nap.compute_tuning_curves( - tsgroup, feature, bins=61, range=(0, 2 * np.pi), feature_names=["feature"] -) - -decoded, proba_feature = nap.decode_bayes( - tuning_curves=tuning_curves_1d, - group=tsgroup, - epochs=epochs, - bin_size=0.06, -) - -fig, (ax1, ax2) = plt.subplots(figsize=(8, 5), nrows=2, ncols=1, sharex=True) -ax1.plot( - np.linspace(0, len(decoded), len(feature.restrict(epochs))), - feature.restrict(epochs), - label="True", -) -ax1.scatter( - np.linspace(0, len(decoded), len(decoded)), - decoded, - label="Decoded", - c="orange", -) -ax1.legend( - frameon=False, - bbox_to_anchor=(1.0, 1.0), -) -ax1.set_xlim(epochs[0, 0], epochs[0, 1]) -im = ax2.imshow(proba_feature.values.T, aspect="auto", origin="lower", cmap="viridis") -cbar_ax = fig.add_axes([0.93, 0.1, 0.015, 0.36]) -fig.colorbar(im, cax=cbar_ax, label="Probability") -ax2.set_xticks([0, len(decoded)], epochs.values[0]) -ax2.set_yticks([]) -ax2.set_xlabel("Time (s)", labelpad=-20) -plt.savefig("decode_bayes_1d.pdf", dpi=300, bbox_inches="tight") -plt.close() - -dt = 0.1 -epochs = nap.IntervalSet(start=0, end=1000, time_units="s") -features = np.vstack((np.cos(np.arange(0, 1000, dt)), np.sin(np.arange(0, 1000, dt)))).T -features = nap.TsdFrame( - t=np.arange(0, 1000, dt), - d=features, - time_units="s", - time_support=epochs, - columns=["a", "b"], -) - -times = features.as_units("us").index.values -ft = features.values -alpha = np.arctan2(ft[:, 1], ft[:, 0]) -bin_centers = np.linspace(-np.pi, np.pi, 12) -kappa = 4.0 -ts_group = {} -for i, mu in enumerate(bin_centers): - weights = np.exp(kappa * np.cos(alpha - mu)) # wrapped Gaussian - weights /= np.max(weights) # normalize to 0–1 - mask = weights > 0.5 - ts = times[mask] - ts_group[i] = nap.Ts(ts, time_units="us") -ts_group = nap.TsGroup(ts_group) - -tuning_curves_2d = nap.compute_tuning_curves( - group=ts_group, - features=features, # containing 2 features - bins=10, - epochs=epochs, - range=[(-1.0, 1.0), (-1.0, 1.0)], # range can be specified for each feature -) - -decoded, proba_feature = nap.decode_bayes( - tuning_curves=tuning_curves_2d, - group=ts_group, - epochs=ts_group.time_support, - bin_size=0.2, -) - -fig, (ax1, ax2, ax3) = plt.subplots(figsize=(8, 3), nrows=1, ncols=3, sharey=True) -ax1.plot(features["a"].get(0, 20), label="True") -ax1.scatter( - decoded["a"].get(0, 20).times(), - decoded["a"].get(0, 20), - label="Decoded", - c="orange", -) -ax1.set_title("Feature a") -ax1.set_xlabel("Time (s)") - -ax2.plot(features["b"].get(0, 20), label="True") -ax2.scatter( - decoded["b"].get(0, 20).times(), - decoded["b"].get(0, 20), - label="Decoded", - c="orange", -) -ax2.set_xlabel("Time (s)") -ax2.set_title("Feature b") - -ax3.plot( - features["a"].get(0, 20), - features["b"].get(0, 20), - label="True", -) -ax3.scatter( - decoded["a"].get(0, 20), - decoded["b"].get(0, 20), - label="Decoded", - c="orange", -) -ax3.set_title("Combined") -plt.savefig("decode_template_2d.pdf", dpi=300, bbox_inches="tight") From 7dfc4baf3fd996a0ef5c0e262b0c1eaeac2fd611 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 24 Jul 2025 22:26:29 +0000 Subject: [PATCH 084/244] more interesting example tuning curves --- doc/user_guide/06_tuning_curves.md | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index bb1b09a1e..da49f37e3 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -311,10 +311,15 @@ features = nap.TsdFrame( # Calcium activity -tsdframe = nap.TsdFrame( - t=timestep, - d=np.random.randn(len(timestep), 2) - ) +ft = features.values +alpha = np.arctan2(ft[:, 1], ft[:, 0]) +bin_centers = np.linspace(-np.pi, np.pi, 6) +kappa = 4.0 +units=[] +for i, mu in enumerate(bin_centers): + units.append(np.exp(kappa * np.cos(alpha - mu))) # wrapped Gaussian +units = np.stack(units, axis=1) +tsdframe = nap.TsdFrame(t=features.times(), d=units) ``` ```{code-cell} ipython3 @@ -330,7 +335,7 @@ tuning_curves_2d ```{code-cell} ipython3 tuning_curves_2d.name="ΔF/F" tuning_curves_2d.attrs["unit"]="a.u." -tuning_curves_2d.plot(col="unit") +tuning_curves_2d.plot(col="unit", col_wrap=3) plt.show() ``` From beed9ec703795b35986fd3c7f1e7e96e09ca7193 Mon Sep 17 00:00:00 2001 From: sjvenditto Date: Fri, 25 Jul 2025 11:35:26 -0400 Subject: [PATCH 085/244] fix metadata doc examples --- pynapple/core/interval_set.py | 18 ++++++++++++------ pynapple/core/ts_group.py | 16 +++++++++++----- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/pynapple/core/interval_set.py b/pynapple/core/interval_set.py index 5bdbaf3e1..45b4c1bec 100644 --- a/pynapple/core/interval_set.py +++ b/pynapple/core/interval_set.py @@ -1289,23 +1289,29 @@ def groupby_apply(self, by, func, input_key=None, **func_kwargs): ... ) >>> feature = nap.Tsd(t=np.arange(40), d=np.concatenate([np.zeros(20), np.ones(20)])) >>> func_kwargs = { - ... "group": tsg, + ... "data": tsg, ... "features": feature, ... "bins": 2, ... } >>> ep.groupby_apply("l2", nap.compute_tuning_curves, input_key="epochs", **func_kwargs) - {'x': Size: 48B + {'x': Size: 48B array([[ nan, 1. ], [ nan, 1.77777778], [ nan, 4.11111111]]) Coordinates: - * unit (unit) int64 24B 1 2 3 - * feature0 (feature0) float64 16B -0.25 0.25, 'y': Size: 48B + * unit (unit) int64 24B 1 2 3 + * 0 (0) float64 16B -0.25 0.25 + Attributes: + occupancy: [nan 9.] + bin_edges: [array([-0.5, 0. , 0.5])], 'y': Size: 48B array([[ nan, 1. ], [ nan, 1.92857143], [ nan, 4.71428571]]) Coordinates: - * unit (unit) int64 24B 1 2 3 - * feature0 (feature0) float64 16B 0.75 1.25} + * unit (unit) int64 24B 1 2 3 + * 0 (0) float64 16B 0.75 1.25 + Attributes: + occupancy: [nan 14.] + bin_edges: [array([0.5, 1. , 1.5])]} """ return _MetadataMixin.groupby_apply(self, by, func, input_key, **func_kwargs) diff --git a/pynapple/core/ts_group.py b/pynapple/core/ts_group.py index a4c77c6b6..9b9721cef 100644 --- a/pynapple/core/ts_group.py +++ b/pynapple/core/ts_group.py @@ -1926,15 +1926,21 @@ def groupby_apply(self, by, func, input_key=None, **func_kwargs): ... time_support=nap.IntervalSet(np.array([[0, 5], [10, 12], [20, 33]])), ... ) >>> print(tsgroup.groupby_apply("l2", nap.compute_tuning_curves, features=feature, bins=2)) - {'x': Size: 32B + {'x': Size: 32B array([[1. , 1. ], [1.77777778, 1.92857143]]) Coordinates: - * unit (unit) int64 16B 0 1 - * feature0 (feature0) float64 16B 0.25 0.75, 'y': Size: 16B + * unit (unit) int64 16B 0 1 + * 0 (0) float64 16B 0.25 0.75 + Attributes: + occupancy: [ 9. 14.] + bin_edges: [array([0. , 0.5, 1. ])], 'y': Size: 16B array([[3.33333333, 3.78571429]]) Coordinates: - * unit (unit) int64 8B 2 - * feature0 (feature0) float64 16B 0.25 0.75} + * unit (unit) int64 8B 2 + * 0 (0) float64 16B 0.25 0.75 + Attributes: + occupancy: [ 9. 14.] + bin_edges: [array([0. , 0.5, 1. ])]} """ return _MetadataMixin.groupby_apply(self, by, func, input_key, **func_kwargs) From f3e0d9c3d80289eeb54e39d86a838a4751884860 Mon Sep 17 00:00:00 2001 From: Wolf De Wulf Date: Fri, 25 Jul 2025 16:21:30 +0000 Subject: [PATCH 086/244] Update doc/user_guide/07_decoding.md Co-authored-by: Sarah Jo Venditto --- doc/user_guide/07_decoding.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index 370e7d1b3..e6e9fe860 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -28,7 +28,8 @@ Input to the decoding functions always includes: - `tuning_curves`, computed using [`nap.compute_tuning_curves`](pynapple.process.tuning_curves.compute_tuning_curves). - `data`, neural activity as a `TsGroup` (spikes), `TsdFrame` (e.g. smoothed rates), or dict of `Ts`/`Tsd`. - `epochs`, to restrict decoding to certain intervals. - - `bin_size`, for when you pass spikes. + - `bin_size`, the size of the bins in which to count the data (spikes only). + - `time_units`, the units of `bin_size`, defaulting to seconds. ## Bayesian decoding Pynapple supports n-dimensional decoding from spikes in the form of Bayesian decoding with a Poisson assumption. From 1d67f829aebcf112327ead8be4c04a5460aaabe9 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 25 Jul 2025 16:27:16 +0000 Subject: [PATCH 087/244] docstring group -> data --- pynapple/process/decoding.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 605d14786..551eb3a3e 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -55,11 +55,11 @@ def decode_bayes( >>> import pynapple as nap >>> import numpy as np - >>> group = nap.TsGroup({i: nap.Ts(t=np.arange(0, 50) + 50 * i) for i in range(2)}) + >>> data = nap.TsGroup({i: nap.Ts(t=np.arange(0, 50) + 50 * i) for i in range(2)}) >>> feature = nap.Tsd(t=np.arange(0, 100, 1), d=np.repeat(np.arange(0, 2), 50)) - >>> tuning_curves = nap.compute_tuning_curves(group, feature, bins=2, range=(-.5, 1.5)) + >>> tuning_curves = nap.compute_tuning_curves(data, feature, bins=2, range=(-.5, 1.5)) >>> epochs = nap.IntervalSet([0, 100]) - >>> decoded, p = nap.decode_bayes(tuning_curves, group, epochs=epochs, bin_size=1) + >>> decoded, p = nap.decode_bayes(tuning_curves, data, epochs=epochs, bin_size=1) >>> decoded Time (s) ---------- -- @@ -110,7 +110,7 @@ def decode_bayes( ... t=np.arange(0, 100, 1), ... d=np.vstack((np.repeat(np.arange(0, 2), 50), np.tile(np.arange(0, 2), 50))).T, ... ) - >>> group = nap.TsGroup( + >>> data = nap.TsGroup( ... { ... 0: nap.Ts(np.arange(0, 50, 2)), ... 1: nap.Ts(np.arange(1, 51, 2)), @@ -118,8 +118,8 @@ def decode_bayes( ... 3: nap.Ts(np.arange(51, 101, 2)), ... } ... ) - >>> tuning_curves = nap.compute_tuning_curves(group, features, bins=2, range=[(-.5, 1.5)]*2) - >>> decoded, p = nap.decode_bayes(tuning_curves, group, epochs=epochs, bin_size=1) + >>> tuning_curves = nap.compute_tuning_curves(data, features, bins=2, range=[(-.5, 1.5)]*2) + >>> decoded, p = nap.decode_bayes(tuning_curves, data, epochs=epochs, bin_size=1) >>> decoded Time (s) 0 1 ---------- --- --- @@ -166,9 +166,9 @@ def decode_bayes( It is also possible to pass continuous values instead of spikes (e.g. smoothed spike counts): - >>> frame = group.count(1).smooth(2) - >>> tuning_curves = nap.compute_tuning_curves(frame, features, bins=2, range=[(-.5, 1.5)]*2) - >>> decoded, p = nap.decode_bayes(tuning_curves, frame, epochs=epochs, bin_size=1) + >>> data = data.count(1).smooth(2) + >>> tuning_curves = nap.compute_tuning_curves(data, features, bins=2, range=[(-.5, 1.5)]*2) + >>> decoded, p = nap.decode_bayes(tuning_curves, data, epochs=epochs, bin_size=1) >>> decoded Time (s) 0 1 ---------- --- --- @@ -295,7 +295,7 @@ def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): Deprecated, use `decode` instead. """ warnings.warn( - "decode_1d is deprecated and will be removed in a future version; use decode instead.", + "decode_1d is deprecated and will be removed in a future version; use decode_bayes instead.", DeprecationWarning, stacklevel=2, ) @@ -333,7 +333,7 @@ def decode_2d(tuning_curves, group, ep, bin_size, xy, time_units="s", features=N Deprecated, use `decode` instead. """ warnings.warn( - "decode_2d is deprecated and will be removed in a future version; use decode instead.", + "decode_2d is deprecated and will be removed in a future version; use decode_bayes instead.", DeprecationWarning, stacklevel=2, ) From d81ce80f11d9836c2d104f09e6ed4a2b992332c5 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 25 Jul 2025 18:33:32 +0000 Subject: [PATCH 088/244] change data input possibilities --- doc/user_guide/06_tuning_curves.md | 2 +- pynapple/process/tuning_curves.py | 40 ++++++++++++++---------------- tests/test_tuning_curves.py | 22 ++++++++-------- 3 files changed, 31 insertions(+), 33 deletions(-) diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index da49f37e3..f15857f34 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -73,7 +73,7 @@ tsgroup = nap.TsGroup( ``` When computing from general time-series, mandatory arguments are: -* `data`: a `TsGroup`, `Tsd`, or `TsdFrame` containing the neural activity of one or more units. +* `data`: a `TsGroup` (or single `Ts`) or TsdFrame (or single `Tsd`) containing the neural activity of one or more units. * `features`: a `Tsd` or `TsdFrame` containing one or more features. By default, 10 bins are used for all features, but you can specify the number of bins, diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 168d4127d..c3f556683 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -96,7 +96,7 @@ def compute_tuning_curves( Parameters ---------- - data : TsGroup, TsdFrame or dict of Ts, Tsd + data : TsGroup, TsdFrame, Ts, Tsd The data for which the tuning curves will be computed. features : Tsd, TsdFrame The features (i.e. one column per feature). @@ -238,30 +238,18 @@ def compute_tuning_curves( """ # check data - if isinstance(data, dict): - data = nap.TsGroup(data) - if isinstance(data, nap.Tsd): - data = nap.TsdFrame( - d=data.values, - t=data.times(), - time_support=data.time_support, - ) - elif not isinstance(data, (nap.TsGroup, nap.TsdFrame)): - raise TypeError("data should be a Tsd, TsdFrame, TsGroup, or dict.") + if not isinstance(data, (nap.TsdFrame, nap.TsGroup, nap.Ts, nap.Tsd)): + raise TypeError("data should be a TsdFrame, TsGroup, Ts, or Tsd.") # check features - if isinstance(features, nap.Tsd): - features = nap.TsdFrame( - d=features.values, - t=features.times(), - time_support=features.time_support, - ) - elif not isinstance(features, nap.TsdFrame): + if not isinstance(features, (nap.TsdFrame, nap.Tsd)): raise TypeError("features should be a Tsd or TsdFrame.") # check feature names if feature_names is None: - feature_names = features.columns + feature_names = ( + features.columns if isinstance(features, nap.TsdFrame) else ["0"] + ) else: if ( not hasattr(feature_names, "__len__") @@ -304,10 +292,16 @@ def compute_tuning_curves( occupancy, bin_edges = np.histogramdd(features, bins=bins, range=range) # tunning curves - keys = data.keys() if isinstance(data, nap.TsGroup) else data.columns + keys = ( + data.keys() + if isinstance(data, nap.TsGroup) + else data.columns if isinstance(data, nap.TsdFrame) else [0] + ) tcs = np.zeros([len(keys), *occupancy.shape]) - if isinstance(data, nap.TsGroup): + if isinstance(data, (nap.TsGroup, nap.Ts)): # SPIKES + if isinstance(data, nap.Ts): + data = {0: data} for i, n in enumerate(keys): tcs[i] = np.histogramdd( data[n].value_from(features, epochs), @@ -318,12 +312,14 @@ def compute_tuning_curves( else: # RATES values = data.value_from(features, epochs) + if isinstance(data, nap.Tsd): + data = np.expand_dims(data.values, -2) counts = np.histogramdd(values, bins=bin_edges)[0] counts[counts == 0] = np.nan for i, n in enumerate(keys): tcs[i] = np.histogramdd( values, - weights=data.values[:, i], + weights=data[:, i], bins=bin_edges, )[0] tcs /= counts diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 4cecbd99d..9b4d223c1 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -29,13 +29,13 @@ def get_features_n(n, fs=10.0): @pytest.mark.parametrize( "data, features, kwargs, expectation", [ - # group + # data ( [1], get_features_n(1), {}, pytest.raises( - TypeError, match="data should be a Tsd, TsdFrame, TsGroup, or dict." + TypeError, match="data should be a TsdFrame, TsGroup, Ts, or Tsd." ), ), ( @@ -43,7 +43,15 @@ def get_features_n(n, fs=10.0): get_features_n(1), {}, pytest.raises( - TypeError, match="data should be a Tsd, TsdFrame, TsGroup, or dict." + TypeError, match="data should be a TsdFrame, TsGroup, Ts, or Tsd." + ), + ), + ( + {1: nap.Ts([1, 2, 3])}, + get_features_n(1), + {}, + pytest.raises( + TypeError, match="data should be a TsdFrame, TsGroup, Ts, or Tsd." ), ), (get_group_n(1), get_features_n(1), {}, does_not_raise()), @@ -51,13 +59,7 @@ def get_features_n(n, fs=10.0): (get_group_n(1).count(0.1), get_features_n(1), {}, does_not_raise()), (get_group_n(3).count(0.1), get_features_n(1), {}, does_not_raise()), (nap.Tsd(t=[1, 2, 3], d=[1, 1, 1]), get_features_n(1), {}, does_not_raise()), - ({1: nap.Ts([1, 2, 3])}, get_features_n(1), {}, does_not_raise()), - ( - {1: nap.Ts([1, 2, 3]), 2: nap.Ts([1, 2, 3])}, - get_features_n(1), - {}, - does_not_raise(), - ), + (nap.Ts([1, 2, 3]), get_features_n(1), {}, does_not_raise()), # features ( get_group_n(1), From 2b9ba0436a73027d87b46cf29c42cd8fe15807ab Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 25 Jul 2025 18:37:49 +0000 Subject: [PATCH 089/244] typo... --- pynapple/process/tuning_curves.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index c3f556683..c188bcb71 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -313,7 +313,7 @@ def compute_tuning_curves( # RATES values = data.value_from(features, epochs) if isinstance(data, nap.Tsd): - data = np.expand_dims(data.values, -2) + data = np.expand_dims(data.values, -1) counts = np.histogramdd(values, bins=bin_edges)[0] counts[counts == 0] = np.nan for i, n in enumerate(keys): From bcfaf16eb078d46834342c93bdd57a6391c8cf09 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 25 Jul 2025 18:44:22 +0000 Subject: [PATCH 090/244] correct range check --- pynapple/process/tuning_curves.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index c188bcb71..22b0e3909 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -277,7 +277,7 @@ def compute_tuning_curves( # check range if range is not None and isinstance(range, tuple): - if features.shape[1] == 1: + if features.ndim == 1 or features.shape[1] == 1: range = [range] else: raise ValueError( From 95b9715be2b4aefc8662dc7b7b209dcba59bc982 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 25 Jul 2025 19:07:02 +0000 Subject: [PATCH 091/244] check fix --- pynapple/process/tuning_curves.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 22b0e3909..e125cb5ce 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -257,7 +257,9 @@ def compute_tuning_curves( or not all(isinstance(n, str) for n in feature_names) ): raise TypeError("feature_names should be a list of strings.") - if len(feature_names) != features.shape[1]: + if len(feature_names) != ( + 1 if isinstance(features, nap.Tsd) else features.shape[-1] + ): raise ValueError("feature_names should match the number of features.") # check epochs From a4c0425eae10ace83bb1dd64b228f92d4c29034f Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 25 Jul 2025 19:25:29 +0000 Subject: [PATCH 092/244] change input types to only TsGroup or TsdFrame --- doc/user_guide/07_decoding.md | 2 +- pynapple/process/decoding.py | 50 +++++++++++++---------------------- tests/test_decoding.py | 16 +++++------ 3 files changed, 26 insertions(+), 42 deletions(-) diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index e6e9fe860..490d7c397 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -26,7 +26,7 @@ sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_par Input to the decoding functions always includes: - `tuning_curves`, computed using [`nap.compute_tuning_curves`](pynapple.process.tuning_curves.compute_tuning_curves). - - `data`, neural activity as a `TsGroup` (spikes), `TsdFrame` (e.g. smoothed rates), or dict of `Ts`/`Tsd`. + - `data`, neural activity as a `TsGroup` (spikes) or `TsdFrame` (smoothed counts). - `epochs`, to restrict decoding to certain intervals. - `bin_size`, the size of the bins in which to count the data (spikes only). - `time_units`, the units of `bin_size`, defaulting to seconds. diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 551eb3a3e..99ea0f589 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -27,9 +27,9 @@ def decode_bayes( ---------- tuning_curves : xr.DataArray Tuning curves as outputed by `compute_tuning_curves` (one for each unit). - data : TsGroup, TsdFrame or dict of Ts, Tsd + data : TsGroup or TsdFrame Neural activity with the same keys as the tuning curves. - You may also pass a TsdFrame with smoothed rates (recommended). + You may also pass a TsdFrame with smoothed counts (recommended). epochs : IntervalSet The epochs on which decoding is computed bin_size : float @@ -197,31 +197,17 @@ def decode_bayes( ) # check data - if isinstance(data, (dict, nap.TsGroup)): - numcells = len(data) - - if tuning_curves.sizes["unit"] != numcells: - raise ValueError("Different shapes for tuning_curves and data.") - - if not np.all(tuning_curves.coords["unit"] == np.array(list(data.keys()))): - raise ValueError("Different indices for tuning curves and data keys.") - - if isinstance(data, dict): - data = nap.TsGroup(data, time_support=epochs) - count = data.count(bin_size, epochs, time_units) - elif isinstance(data, nap.TsdFrame): - numcells = data.shape[1] - - if tuning_curves.sizes["unit"] != numcells: - raise ValueError("Different shapes for tuning_curves and data.") - - if not np.all(tuning_curves.coords["unit"] == data.columns): - raise ValueError("Different indices for tuning curves and data keys.") - - count = data - else: + if isinstance(data, nap.TsGroup): + data = data.count(bin_size, epochs, time_units) + elif not isinstance(data, nap.TsdFrame): raise TypeError("Unknown format for data.") + # check match + if tuning_curves.sizes["unit"] != data.shape[1]: + raise ValueError("Different shapes for tuning_curves and data.") + if not np.all(tuning_curves.coords["unit"] == data.columns.values): + raise ValueError("Different indices for tuning curves and data keys.") + if uniform_prior: occupancy = np.ones_like(tuning_curves[0]).flatten() else: @@ -233,7 +219,7 @@ def decode_bayes( # Transforming to pure numpy array tc = tuning_curves.values.reshape(tuning_curves.sizes["unit"], -1).T - ct = count.values + ct = data.values bin_size_s = nap.TsIndex.format_timestamps( np.array([bin_size], dtype=np.float64), time_units )[0] @@ -253,13 +239,13 @@ def decode_bayes( p = p.reshape(p.shape[0], *tuning_curves.shape[1:]) if p.ndim > 2: p = nap.TsdTensor( - t=count.index, + t=data.index, d=p, time_support=epochs, ) else: p = nap.TsdFrame( - t=count.index, + t=data.index, d=p, time_support=epochs, columns=tuning_curves.coords[tuning_curves.dims[1]].values, @@ -269,13 +255,13 @@ def decode_bayes( if tuning_curves.ndim == 2: decoded = nap.Tsd( - t=count.index, + t=data.index, d=tuning_curves.coords[tuning_curves.dims[1]][idxmax[0]].values, time_support=epochs, ) else: decoded = nap.TsdFrame( - t=count.index, + t=data.index, d=np.stack( [ tuning_curves.coords[dim][idxmax[i]] @@ -320,7 +306,7 @@ def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): }, attrs={"occupancy": occupancy}, ), - group, + nap.TsGroup(group) if isinstance(group, dict) else group, ep, bin_size, time_units=time_units, @@ -361,7 +347,7 @@ def decode_2d(tuning_curves, group, ep, bin_size, xy, time_units="s", features=N coords={"unit": indexes, "0": xy[0], "1": xy[1]}, attrs={"occupancy": occupancy}, ), - group, + nap.TsGroup(group) if isinstance(group, dict) else group, ep, bin_size, time_units=time_units, diff --git a/tests/test_decoding.py b/tests/test_decoding.py index ce94417f8..2876aa1c4 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -18,13 +18,15 @@ def get_testing_set_n(n_features=1, binned=False): features = nap.TsdFrame(t=times, d=feature_data) epochs = nap.IntervalSet(start=0, end=len(times)) - data = { - i: nap.Ts(t=times[np.all(feature_data == combo, axis=1)]) - for i, combo in enumerate(combos) - } + data = nap.TsGroup( + { + i: nap.Ts(t=times[np.all(feature_data == combo, axis=1)]) + for i, combo in enumerate(combos) + } + ) if binned: - frame = nap.TsGroup(data).count(bin_size=1, ep=epochs) + frame = data.count(bin_size=1, ep=epochs) data = nap.TsdFrame( frame.times() - 0.5, frame.values, @@ -142,10 +144,6 @@ def get_testing_set_n(n_features=1, binned=False): match="Different indices for tuning curves and data keys.", ), ), - ( - {"data": nap.TsGroup(get_testing_set_n()["data"])}, - does_not_raise(), - ), ( {"data": get_testing_set_n(binned=True)["data"]}, does_not_raise(), From 308c448bba72ecb69532eeec61960de74a0d3bd9 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 29 Jul 2025 15:16:34 +0000 Subject: [PATCH 093/244] init --- pynapple/process/decoding.py | 287 +++++++++++++++++++++++++++++- pynapple/process/tuning_curves.py | 1 - 2 files changed, 286 insertions(+), 2 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 69f4d4813..a46dd63a5 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -1,5 +1,5 @@ """ -Decoding functions for timestamps data (spike times). The first argument is always a tuning curves object. +Decoding functions. """ import warnings @@ -290,6 +290,291 @@ def decode_bayes( return decoded, p +def decode_template( + tuning_curves, group, epochs, bin_size, time_units="s", uniform_prior=True +): + """ + Performs Bayesian decoding over n-dimensional features. + + See: + Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. + (1998). Interpreting neuronal population activity by + reconstruction: unified framework with application to + hippocampal place cells. Journal of neurophysiology, 79(2), + 1017-1044. + + Parameters + ---------- + tuning_curves : xr.DataArray + Tuning curves as outputed by `compute_tuning_curves` (one for each unit). + group : TsGroup, TsdFrame or dict of Ts, Tsd + A group of neurons with the same keys as the tuning curves. + You may also pass a TsdFrame with smoothed rates (recommended). + epochs : IntervalSet + The epochs on which decoding is computed + bin_size : float + Bin size. Default is second. Use the parameter time_units to change it. + time_units : str, optional + Time unit of the bin size ('s' [default], 'ms', 'us'). + uniform_prior : bool, optional + If True (default), uses a uniform distribution as a prior. + If False, uses the occupancy from the tuning curves as a prior over the feature + probability distribution. + + Returns + ------- + Tsd + The decoded feature + TsdFrame, TsdTensor + The probability distribution of the decoded trajectory for each time bin + + + Examples + -------- + In the simplest case, we can decode a single feature (e.g., position) from a group of neurons: + + >>> import pynapple as nap + >>> import numpy as np + >>> group = nap.TsGroup({i: nap.Ts(t=np.arange(0, 50) + 50 * i) for i in range(2)}) + >>> feature = nap.Tsd(t=np.arange(0, 100, 1), d=np.repeat(np.arange(0, 2), 50)) + >>> tuning_curves = nap.compute_tuning_curves(group, feature, bins=2, range=(-.5, 1.5)) + >>> epochs = nap.IntervalSet([0, 100]) + >>> decoded, p = nap.decode_bayes(tuning_curves, group, epochs=epochs, bin_size=1) + >>> decoded + Time (s) + ---------- -- + 0.5 0 + 1.5 0 + 2.5 0 + 3.5 0 + 4.5 0 + 5.5 0 + 6.5 0 + ... + 93.5 1 + 94.5 1 + 95.5 1 + 96.5 1 + 97.5 1 + 98.5 1 + 99.5 1 + dtype: float64, shape: (100,) + + decode is a `Tsd` object containing the decoded feature for each time bin. + + >>> p + Time (s) 0 1 + ---------- --- --- + 0.5 1.0 0.0 + 1.5 1.0 0.0 + 2.5 1.0 0.0 + 3.5 1.0 0.0 + 4.5 1.0 0.0 + 5.5 1.0 0.0 + 6.5 1.0 0.0 + ... ... ... + 93.5 0.0 1.0 + 94.5 0.0 1.0 + 95.5 0.0 1.0 + 96.5 0.0 1.0 + 97.5 0.0 1.0 + 98.5 0.0 1.0 + 99.5 0.0 1.0 + dtype: float64, shape: (100, 2) + + p is a `TsdFrame` object containing the probability distribution for each time bin. + + The function also works for multiple features, in which case it does n-dimensional decoding: + + >>> features = nap.TsdFrame( + ... t=np.arange(0, 100, 1), + ... d=np.vstack((np.repeat(np.arange(0, 2), 50), np.tile(np.arange(0, 2), 50))).T, + ... ) + >>> group = nap.TsGroup( + ... { + ... 0: nap.Ts(np.arange(0, 50, 2)), + ... 1: nap.Ts(np.arange(1, 51, 2)), + ... 2: nap.Ts(np.arange(50, 100, 2)), + ... 3: nap.Ts(np.arange(51, 101, 2)), + ... } + ... ) + >>> tuning_curves = nap.compute_tuning_curves(group, features, bins=2, range=[(-.5, 1.5)]*2) + >>> decoded, p = nap.decode_bayes(tuning_curves, group, epochs=epochs, bin_size=1) + >>> decoded + Time (s) 0 1 + ---------- --- --- + 0.5 0.0 0.0 + 1.5 0.0 1.0 + 2.5 0.0 0.0 + 3.5 0.0 1.0 + 4.5 0.0 0.0 + 5.5 0.0 1.0 + 6.5 0.0 0.0 + ... ... ... + 93.5 1.0 1.0 + 94.5 1.0 0.0 + 95.5 1.0 1.0 + 96.5 1.0 0.0 + 97.5 1.0 1.0 + 98.5 1.0 0.0 + 99.5 1.0 1.0 + dtype: float64, shape: (100, 2) + + decoded is now a `TsdFrame` object containing the decoded features for each time bin. + + >>> p + Time (s) + ---------- -------------- + 0.5 [[1., 0.] ...] + 1.5 [[0., 1.] ...] + 2.5 [[1., 0.] ...] + 3.5 [[0., 1.] ...] + 4.5 [[1., 0.] ...] + 5.5 [[0., 1.] ...] + 6.5 [[1., 0.] ...] + ... + 93.5 [[0., 0.] ...] + 94.5 [[0., 0.] ...] + 95.5 [[0., 0.] ...] + 96.5 [[0., 0.] ...] + 97.5 [[0., 0.] ...] + 98.5 [[0., 0.] ...] + 99.5 [[0., 0.] ...] + dtype: float64, shape: (100, 2, 2) + + and p is a `TsdTensor` object containing the probability distribution for each time bin. + + It is also possible to pass continuous values instead of spikes (e.g. smoothed spike counts): + + >>> group = group.count(1).smooth(2) + >>> tuning_curves = nap.compute_tuning_curves(group, features, bins=2, range=[(-.5, 1.5)]*2) + >>> decoded, p = nap.decode_bayes(tuning_curves, group, epochs=epochs, bin_size=1) + >>> decoded + Time (s) 0 1 + ---------- --- --- + 0.5 0.0 1.0 + 1.5 0.0 1.0 + 2.5 0.0 1.0 + 3.5 0.0 1.0 + 4.5 0.0 0.0 + 5.5 0.0 0.0 + 6.5 0.0 0.0 + ... ... ... + 92.5 1.0 0.0 + 93.5 1.0 0.0 + 94.5 1.0 0.0 + 95.5 1.0 1.0 + 96.5 1.0 1.0 + 97.5 1.0 1.0 + 98.5 1.0 1.0 + dtype: float64, shape: (98, 2) + """ + + # check tuning curves + if not isinstance(tuning_curves, xr.DataArray): + raise TypeError( + "tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves." + ) + + # check group + if isinstance(group, (dict, nap.TsGroup)): + numcells = len(group) + + if tuning_curves.sizes["unit"] != numcells: + raise ValueError("Different shapes for tuning_curves and group.") + + if not np.all(tuning_curves.coords["unit"] == np.array(list(group.keys()))): + raise ValueError("Different indices for tuning curves and group keys.") + + if isinstance(group, dict): + group = nap.TsGroup(group, time_support=epochs) + count = group.count(bin_size, epochs, time_units) + elif isinstance(group, nap.TsdFrame): + numcells = group.shape[1] + + if tuning_curves.sizes["unit"] != numcells: + raise ValueError("Different shapes for tuning_curves and group.") + + if not np.all(tuning_curves.coords["unit"] == group.columns): + raise ValueError("Different indices for tuning curves and group keys.") + + count = group + else: + raise TypeError("Unknown format for group.") + + if uniform_prior: + occupancy = np.ones_like(tuning_curves[0]).flatten() + else: + if "occupancy" not in tuning_curves.attrs: + raise ValueError( + "uniform_prior set to False but no occupancy found in tuning curves." + ) + occupancy = tuning_curves.attrs["occupancy"].flatten() + + # Transforming to pure numpy array + tc = tuning_curves.values.reshape(tuning_curves.sizes["unit"], -1).T + ct = count.values + bin_size_s = nap.TsIndex.format_timestamps( + np.array([bin_size], dtype=np.float64), time_units + )[0] + + p1 = np.exp(-bin_size_s * np.nansum(tc, 1)) + p2 = occupancy / occupancy.sum() + + ct2 = np.tile(ct[:, np.newaxis, :], (1, tc.shape[0], 1)) + + p3 = np.nanprod(tc**ct2, -1) + + p = p1 * p2 * p3 + p = p / p.sum(1)[:, np.newaxis] + + idxmax = np.argmax(p, 1) + + p = p.reshape(p.shape[0], *tuning_curves.shape[1:]) + if p.ndim > 2: + p = nap.TsdTensor( + t=count.index, + d=p, + time_support=epochs, + ) + else: + p = nap.TsdFrame( + t=count.index, + d=p, + time_support=epochs, + columns=tuning_curves.coords[tuning_curves.dims[1]].values, + ) + + idxmax = np.unravel_index(idxmax, tuning_curves.shape[1:]) + + if tuning_curves.ndim == 2: + decoded = nap.Tsd( + t=count.index, + d=tuning_curves.coords[tuning_curves.dims[1]][idxmax[0]].values, + time_support=epochs, + ) + else: + decoded = nap.TsdFrame( + t=count.index, + d=np.stack( + [ + tuning_curves.coords[dim][idxmax[i]] + for i, dim in enumerate(tuning_curves.dims[1:]) + ], + axis=1, + ), + time_support=epochs, + columns=tuning_curves.dims[1:], + ) + + return decoded, p + + +# ------------------------------------------------------------------------------------- +# Deprecated functions for backward compatibility +# ------------------------------------------------------------------------------------- + + def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): warnings.warn( "decode_1d is deprecated and will be removed in a future version; use decode instead.", diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index e2eb01d20..7e35dd0dd 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -1,6 +1,5 @@ """ Functions to compute tuning curves for features in 1 dimension or 2 dimension. - """ import inspect From 2f83ef3c04bda8976266e1914322113a8b2d779f Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 29 Jul 2025 16:15:10 +0000 Subject: [PATCH 094/244] first version of template decoding --- pynapple/process/__init__.py | 2 +- pynapple/process/decoding.py | 298 ++++++++++++++--------------------- 2 files changed, 122 insertions(+), 178 deletions(-) diff --git a/pynapple/process/__init__.py b/pynapple/process/__init__.py index 1fde96ca3..cba778160 100644 --- a/pynapple/process/__init__.py +++ b/pynapple/process/__init__.py @@ -4,7 +4,7 @@ compute_eventcorrelogram, compute_isi_distribution, ) -from .decoding import decode_1d, decode_2d, decode_bayes +from .decoding import decode_1d, decode_2d, decode_bayes, decode_template from .filtering import ( apply_bandpass_filter, apply_bandstop_filter, diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 23cded6b5..c95b53b1a 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -1,15 +1,116 @@ """ -Decoding functions. +Functions to decode n-dimensional features. """ +import inspect import warnings +from functools import wraps import numpy as np import xarray as xr +from scipy.spatial.distance import cdist from .. import core as nap +def _validate_decoding_inputs(func): + @wraps(func) + def wrapper(*args, **kwargs): + # Validate each positional argument + sig = inspect.signature(func) + kwargs = sig.bind_partial(*args, **kwargs).arguments + + # check tuning curves + tuning_curves = kwargs["tuning_curves"] + if not isinstance(tuning_curves, xr.DataArray): + raise TypeError( + "tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves." + ) + + # check data + data = kwargs["data"] + if isinstance(data, nap.TsGroup): + data = data.count( + kwargs["bin_size"], + kwargs.get("epochs", None), + kwargs.get("time_units", "s"), + ) + elif not isinstance(data, nap.TsdFrame): + raise TypeError("Unknown format for data.") + kwargs["data"] = data + + # check match + if tuning_curves.sizes["unit"] != data.shape[1]: + raise ValueError("Different shapes for tuning_curves and data.") + if not np.all(tuning_curves.coords["unit"] == data.columns.values): + raise ValueError("Different indices for tuning curves and data keys.") + + if ( + "uniform_prior" in kwargs + and not kwargs["uniform_prior"] + and "occupancy" not in tuning_curves.attrs + ): + raise ValueError( + "uniform_prior set to False but no occupancy found in tuning curves." + ) + + # Call the original function with validated inputs + return func(**kwargs) + + return wrapper + + +def _format_decoding_outputs(func): + @wraps(func) + def wrapper(tuning_curves, data, epochs, *args, **kwargs): + p = func(tuning_curves, data, epochs, *args, **kwargs) + idxmax = np.argmax(p, 1) + + # Fromat probability distribution + p = p.reshape(p.shape[0], *tuning_curves.shape[1:]) + if p.ndim > 2: + p = nap.TsdTensor( + t=data.index, + d=p, + time_support=epochs, + ) + else: + p = nap.TsdFrame( + t=data.index, + d=p, + time_support=epochs, + columns=tuning_curves.coords[tuning_curves.dims[1]].values, + ) + + # Format decoded + idxmax = np.unravel_index(idxmax, tuning_curves.shape[1:]) + if tuning_curves.ndim == 2: + decoded = nap.Tsd( + t=data.index, + d=tuning_curves.coords[tuning_curves.dims[1]][idxmax[0]].values, + time_support=epochs, + ) + else: + decoded = nap.TsdFrame( + t=data.index, + d=np.stack( + [ + tuning_curves.coords[dim][idxmax[i]] + for i, dim in enumerate(tuning_curves.dims[1:]) + ], + axis=1, + ), + time_support=epochs, + columns=tuning_curves.dims[1:], + ) + + return decoded, p + + return wrapper + + +@_validate_decoding_inputs +@_format_decoding_outputs def decode_bayes( tuning_curves, data, epochs, bin_size, time_units="s", uniform_prior=True ): @@ -189,35 +290,12 @@ def decode_bayes( 98.5 1.0 1.0 dtype: float64, shape: (98, 2) """ + occupancy = ( + np.ones_like(tuning_curves[0]).flatten() + if uniform_prior + else tuning_curves.attrs["occupancy"].flatten() + ) - # check tuning curves - if not isinstance(tuning_curves, xr.DataArray): - raise TypeError( - "tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves." - ) - - # check data - if isinstance(data, nap.TsGroup): - data = data.count(bin_size, epochs, time_units) - elif not isinstance(data, nap.TsdFrame): - raise TypeError("Unknown format for data.") - - # check match - if tuning_curves.sizes["unit"] != data.shape[1]: - raise ValueError("Different shapes for tuning_curves and data.") - if not np.all(tuning_curves.coords["unit"] == data.columns.values): - raise ValueError("Different indices for tuning curves and data keys.") - - if uniform_prior: - occupancy = np.ones_like(tuning_curves[0]).flatten() - else: - if "occupancy" not in tuning_curves.attrs: - raise ValueError( - "uniform_prior set to False but no occupancy found in tuning curves." - ) - occupancy = tuning_curves.attrs["occupancy"].flatten() - - # Transforming to pure numpy array tc = tuning_curves.values.reshape(tuning_curves.sizes["unit"], -1).T ct = data.values bin_size_s = nap.TsIndex.format_timestamps( @@ -232,55 +310,16 @@ def decode_bayes( p3 = np.nanprod(tc**ct2, -1) p = p1 * p2 * p3 - p = p / p.sum(1)[:, np.newaxis] - - idxmax = np.argmax(p, 1) - - p = p.reshape(p.shape[0], *tuning_curves.shape[1:]) - if p.ndim > 2: - p = nap.TsdTensor( - t=data.index, - d=p, - time_support=epochs, - ) - else: - p = nap.TsdFrame( - t=data.index, - d=p, - time_support=epochs, - columns=tuning_curves.coords[tuning_curves.dims[1]].values, - ) - - idxmax = np.unravel_index(idxmax, tuning_curves.shape[1:]) - - if tuning_curves.ndim == 2: - decoded = nap.Tsd( - t=data.index, - d=tuning_curves.coords[tuning_curves.dims[1]][idxmax[0]].values, - time_support=epochs, - ) - else: - decoded = nap.TsdFrame( - t=data.index, - d=np.stack( - [ - tuning_curves.coords[dim][idxmax[i]] - for i, dim in enumerate(tuning_curves.dims[1:]) - ], - axis=1, - ), - time_support=epochs, - columns=tuning_curves.dims[1:], - ) - - return decoded, p + return p / p.sum(1)[:, np.newaxis] +@_validate_decoding_inputs +@_format_decoding_outputs def decode_template( - tuning_curves, group, epochs, bin_size, time_units="s", uniform_prior=True + tuning_curves, data, epochs, bin_size, metric="correlation", time_units="s" ): """ - Performs Bayesian decoding over n-dimensional features. + Performs template matching decoding over n-dimensional features. See: Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. @@ -293,19 +332,17 @@ def decode_template( ---------- tuning_curves : xr.DataArray Tuning curves as outputed by `compute_tuning_curves` (one for each unit). - group : TsGroup, TsdFrame or dict of Ts, Tsd - A group of neurons with the same keys as the tuning curves. - You may also pass a TsdFrame with smoothed rates (recommended). + data : TsGroup or TsdFrame + Neural activity with the same keys as the tuning curves. + You may also pass a TsdFrame with smoothed counts (recommended). epochs : IntervalSet The epochs on which decoding is computed bin_size : float Bin size. Default is second. Use the parameter time_units to change it. + metric : str, optional + The distance metric to use for template matching. Default is 'correlation'. time_units : str, optional Time unit of the bin size ('s' [default], 'ms', 'us'). - uniform_prior : bool, optional - If True (default), uses a uniform distribution as a prior. - If False, uses the occupancy from the tuning curves as a prior over the feature - probability distribution. Returns ------- @@ -455,105 +492,12 @@ def decode_template( 98.5 1.0 1.0 dtype: float64, shape: (98, 2) """ - - # check tuning curves - if not isinstance(tuning_curves, xr.DataArray): - raise TypeError( - "tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves." - ) - - # check group - if isinstance(group, (dict, nap.TsGroup)): - numcells = len(group) - - if tuning_curves.sizes["unit"] != numcells: - raise ValueError("Different shapes for tuning_curves and group.") - - if not np.all(tuning_curves.coords["unit"] == np.array(list(group.keys()))): - raise ValueError("Different indices for tuning curves and group keys.") - - if isinstance(group, dict): - group = nap.TsGroup(group, time_support=epochs) - count = group.count(bin_size, epochs, time_units) - elif isinstance(group, nap.TsdFrame): - numcells = group.shape[1] - - if tuning_curves.sizes["unit"] != numcells: - raise ValueError("Different shapes for tuning_curves and group.") - - if not np.all(tuning_curves.coords["unit"] == group.columns): - raise ValueError("Different indices for tuning curves and group keys.") - - count = group - else: - raise TypeError("Unknown format for group.") - - if uniform_prior: - occupancy = np.ones_like(tuning_curves[0]).flatten() - else: - if "occupancy" not in tuning_curves.attrs: - raise ValueError( - "uniform_prior set to False but no occupancy found in tuning curves." - ) - occupancy = tuning_curves.attrs["occupancy"].flatten() - - # Transforming to pure numpy array tc = tuning_curves.values.reshape(tuning_curves.sizes["unit"], -1).T - ct = count.values - bin_size_s = nap.TsIndex.format_timestamps( - np.array([bin_size], dtype=np.float64), time_units - )[0] - - p1 = np.exp(-bin_size_s * np.nansum(tc, 1)) - p2 = occupancy / occupancy.sum() - - ct2 = np.tile(ct[:, np.newaxis, :], (1, tc.shape[0], 1)) - - p3 = np.nanprod(tc**ct2, -1) - - p = p1 * p2 * p3 - p = p / p.sum(1)[:, np.newaxis] - - idxmax = np.argmax(p, 1) - - p = p.reshape(p.shape[0], *tuning_curves.shape[1:]) - if p.ndim > 2: - p = nap.TsdTensor( - t=count.index, - d=p, - time_support=epochs, - ) - else: - p = nap.TsdFrame( - t=count.index, - d=p, - time_support=epochs, - columns=tuning_curves.coords[tuning_curves.dims[1]].values, - ) - - idxmax = np.unravel_index(idxmax, tuning_curves.shape[1:]) - - if tuning_curves.ndim == 2: - decoded = nap.Tsd( - t=count.index, - d=tuning_curves.coords[tuning_curves.dims[1]][idxmax[0]].values, - time_support=epochs, - ) - else: - decoded = nap.TsdFrame( - t=count.index, - d=np.stack( - [ - tuning_curves.coords[dim][idxmax[i]] - for i, dim in enumerate(tuning_curves.dims[1:]) - ], - axis=1, - ), - time_support=epochs, - columns=tuning_curves.dims[1:], - ) + ct = data.values - return decoded, p + dist = cdist(ct, tc, metric=metric) + sim = 1 / (dist + 1e-12) + return sim / sim.sum(axis=1, keepdims=True) # ------------------------------------------------------------------------------------- From e7ecc0da08a96a67a988581cf5a43c9be7309079 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 29 Jul 2025 17:05:50 +0000 Subject: [PATCH 095/244] tests --- tests/test_decoding.py | 48 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 2876aa1c4..4ae4459c3 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -156,6 +156,21 @@ def get_testing_set_n(n_features=1, binned=False): get_testing_set_n(3, binned=True), does_not_raise(), ), + ], +) +def test_decode_input_errors(overwrite_default_args, expectation): + default_args = get_testing_set_n() + default_args.update(overwrite_default_args) + default_args.pop("features") + with expectation: + nap.decode_bayes(**default_args) + nap.decode_template(**default_args) + + +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize( + "overwrite_default_args, expectation", + [ # uniform_prior ( { @@ -175,7 +190,7 @@ def get_testing_set_n(n_features=1, binned=False): ), ], ) -def test_decode_bayes_type_errors(overwrite_default_args, expectation): +def test_decode_bayes_input_errors(overwrite_default_args, expectation): default_args = get_testing_set_n() default_args.update(overwrite_default_args) default_args.pop("features") @@ -214,6 +229,37 @@ def test_decode_bayes(n_features, binned, uniform_prior): np.testing.assert_array_almost_equal(proba.values, expected_proba) +@pytest.mark.parametrize("metric", ["correlation", "euclidean", "cosine"]) +@pytest.mark.parametrize("n_features", [1, 2, 3]) +@pytest.mark.parametrize("binned", [True, False]) +def test_decode_template(n_features, binned, metric): + features, tuning_curves, data, epochs, bin_size = get_testing_set_n( + n_features, binned=binned + ).values() + decoded, proba = nap.decode_template( + tuning_curves=tuning_curves, + data=data, + epochs=epochs, + metric=metric, + bin_size=bin_size, + time_units="s", + ) + + assert isinstance(decoded, nap.Tsd if features.shape[1] == 1 else nap.TsdFrame) + np.testing.assert_array_almost_equal(decoded.values, features.values.squeeze()) + + assert isinstance( + proba, + nap.TsdFrame if features.shape[1] == 1 else nap.TsdTensor, + ) + expected_proba = np.zeros((len(features), *tuning_curves.shape[1:])) + target_indices = [np.arange(len(features))] + [ + features[:, d] for d in range(features.shape[1]) + ] + expected_proba[tuple(target_indices)] = 1.0 + np.testing.assert_array_almost_equal(proba.values, expected_proba) + + # ------------------------------------------------------------------------------------ # OLD DECODING TESTS # ------------------------------------------------------------------------------------ From 12c04849331364cf5e7aca6821fc6bfcbd76647f Mon Sep 17 00:00:00 2001 From: sjvenditto Date: Wed, 30 Jul 2025 12:48:51 -0400 Subject: [PATCH 096/244] add restrict_info method for metadata --- .gitignore | 2 + doc/user_guide/03_metadata.md | 12 ++++++ pynapple/core/interval_set.py | 39 +++++++++++++++++++ pynapple/core/metadata_class.py | 33 ++++++++++++++++ pynapple/core/time_series.py | 59 ++++++++++++++++++++++++++++ pynapple/core/ts_group.py | 46 ++++++++++++++++++++++ tests/test_metadata.py | 69 +++++++++++++++++++++++++++++++++ 7 files changed, 260 insertions(+) diff --git a/.gitignore b/.gitignore index 66781c0f5..081e5c05d 100644 --- a/.gitignore +++ b/.gitignore @@ -165,3 +165,5 @@ your # Ignore npz files from testing: tests/*.npz .vscode/settings.json +doc/user_guide/MyProject/sub-A2929/A2929-200711/stimulus-fish.json +doc/user_guide/memmap.dat diff --git a/doc/user_guide/03_metadata.md b/doc/user_guide/03_metadata.md index 6c1116ce9..abee2b3a4 100644 --- a/doc/user_guide/03_metadata.md +++ b/doc/user_guide/03_metadata.md @@ -198,6 +198,18 @@ tsgroup.drop_info("coords") print(tsgroup) ``` +## Restricting metadata +Instead of dropping multiple metadata fields, you may want to restrict to a set of specified fields, i.e. select which columns to keep. For this operation, use the [`restrict_info()`](pynapple.TsGroup.restrict_info) method. Multiple metadata columns can be kept by passing a list of metadata names. +```{code-cell} ipython3 +import copy +tsgroup2 = copy.deepcopy(tsgroup) +tsgroup2.restrict_info("region") +print(tsgroup2) +``` +```{admonition} Note +The `rate` column will always be kept for a `TsGroup`. +``` + ## Using metadata to slice objects Metadata can be used to slice or filter objects based on metadata values. ```{code-cell} ipython3 diff --git a/pynapple/core/interval_set.py b/pynapple/core/interval_set.py index 59e51b835..635ee0032 100644 --- a/pynapple/core/interval_set.py +++ b/pynapple/core/interval_set.py @@ -1198,6 +1198,45 @@ def drop_info(self, key): """ return _MetadataMixin.drop_info(self, key) + @add_meta_docstring("restrict_info") + def restrict_info(self, key): + """ + Examples + -------- + >>> import pynapple as nap + >>> import numpy as np + >>> times = np.array([[0, 5], [10, 12], [20, 33]]) + >>> metadata = {"l1": [1, 2, 3], "l2": ["x", "x", "y"], "l3": [4, 5, 6]} + >>> ep = nap.IntervalSet(tmp,metadata=metadata) + >>> ep + index start end l1 l2 l3 + 0 0 5 1 x 4 + 1 10 12 2 x 5 + 2 20 33 3 y 6 + shape: (3, 2), time unit: sec. + + To restrict to multiple metadata columns: + + >>> ep.restrict_info(["l2", "l3"]) + >>> ep + index start end l2 l3 + 0 0 5 x 4 + 1 10 12 x 5 + 2 20 33 y 6 + shape: (3, 2), time unit: sec. + + To restrict to a single metadata column: + + >>> ep.restrict_info("l2") + >>> ep + index start end l2 + 0 0 5 x + 1 10 12 x + 2 20 33 y + shape: (3, 2), time unit: sec. + """ + return _MetadataMixin.restrict_info(self, key) + @add_or_convert_metadata @add_meta_docstring("groupby") def groupby(self, by, get_group=None): diff --git a/pynapple/core/metadata_class.py b/pynapple/core/metadata_class.py index e00413d13..05309d14e 100644 --- a/pynapple/core/metadata_class.py +++ b/pynapple/core/metadata_class.py @@ -407,6 +407,39 @@ def drop_info(self, key): f"Invalid metadata column {key}. Metadata columns are {self.metadata_columns}" ) + def restrict_info(self, key): + """ + Restrict metadata columns to a key or list of keys. + + Parameters + ---------- + key : str or list of str + Metadata column name(s) to restrict to. + + Returns + ------- + None + """ + if isinstance(key, Number): + raise TypeError( + f"Invalid metadata column {key}. Metadata columns are {self.metadata_columns}" + ) + if isinstance(key, str): + key = [key] + + no_keep = [k for k in key if k not in self.metadata_columns] + if no_keep: + raise KeyError( + f"Metadata column(s) {no_keep} not found. Metadata columns are {self.metadata_columns}" + ) + + drop_keys = set(self.metadata_columns) - set(key) + for k in drop_keys: + if (self.nap_class == "TsGroup") and (k == "rate"): + continue # cannot drop TsGroup 'rate' + else: + del self._metadata[k] + def groupby(self, by, get_group=None): """ Group pynapple object by metadata name(s). diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index 071276f5a..357b0644a 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -2371,6 +2371,65 @@ def drop_info(self, key): """ return _MetadataMixin.drop_info(self, key) + @add_meta_docstring("restrict_info") + def restrict_info(self, key): + """ + Examples + -------- + >>> import pynapple as nap + >>> import numpy as np + >>> metadata = {"l1": [1, 2, 3], "l2": ["x", "x", "y"], "l3": [4, 5, 6]} + >>> tsdframe = nap.TsdFrame(t=np.arange(5), d=np.ones((5, 3)), metadata=metadata) + >>> print(tsdframe) + Time (s) 0 1 2 + ---------- --- --- --- + 0.0 1.0 1.0 1.0 + 1.0 1.0 1.0 1.0 + 2.0 1.0 1.0 1.0 + 3.0 1.0 1.0 1.0 + 4.0 1.0 1.0 1.0 + Metadata + ---------- --- --- --- + l1 1 2 3 + l2 x x y + l3 4 5 6 + dtype: float64, shape: (5, 3) + + To restrict to multiple metadata rows: + + >>> tsdframe.restrict_info(["l2", "l3"]) + >>> tsdframe + Time (s) 0 1 2 + ---------- --- --- --- + 0.0 1.0 1.0 1.0 + 1.0 1.0 1.0 1.0 + 2.0 1.0 1.0 1.0 + 3.0 1.0 1.0 1.0 + 4.0 1.0 1.0 1.0 + Metadata + ---------- --- --- --- + l2 x x y + l3 4 5 6 + dtype: float64, shape: (5, 3) + + To restrict to a single metadata row: + + >>> tsdframe.restrict_info("l2") + >>> tsdframe + Time (s) 0 1 2 + ---------- --- --- --- + 0 1 1 1 + 1 1 1 1 + 2 1 1 1 + 3 1 1 1 + 4 1 1 1 + Metadata + ---------- --- --- --- + l2 x x y + dtype: float64, shape: (5, 3) + """ + return _MetadataMixin.restrict_info(self, key) + @add_or_convert_metadata @add_meta_docstring("groupby") def groupby(self, by, get_group=None): diff --git a/pynapple/core/ts_group.py b/pynapple/core/ts_group.py index 4f3a3667b..f1e43103f 100644 --- a/pynapple/core/ts_group.py +++ b/pynapple/core/ts_group.py @@ -1843,6 +1843,52 @@ def drop_info(self, key): """ return _MetadataMixin.drop_info(self, key) + @add_meta_docstring("restrict_info") + def restrict_info(self, key): + """ + Note + ---- + The `rate` column is always kept in the metadata, even if it is not specified in `key`. + + Examples + -------- + >>> import pynapple as nap + >>> import numpy as np + >>> tmp = {0:nap.Ts(t=np.arange(0,200), time_units='s'), + ... 1:nap.Ts(t=np.arange(0,200,0.5), time_units='s'), + ... 2:nap.Ts(t=np.arange(0,300,0.25), time_units='s'), + ... } + >>> metadata = {"l1": [1, 2, 3], "l2": ["x", "x", "y"], "l3": [4, 5, 6]} + >>> tsgroup = nap.TsGroup(tmp,metadata=metadata) + >>> print(tsgroup) + Index rate l1 l2 l3 + ------- ------- ---- ---- ---- + 0 0.66722 1 x 4 + 1 1.33445 2 x 5 + 2 4.00334 3 y 6 + + To restrict to multiple metadata columns: + + >>> tsgroup.restrict_info(["l2", "l3"]) + >>> tsgroup + Index rate l2 l3 + ------- ------- ---- ---- + 0 0.66722 x 4 + 1 1.33445 x 5 + 2 4.00334 y 6 + + To restrict to a single metadata column: + + >>> tsgroup.drop_info("l2") + >>> tsgroup + Index rate l2 + ------- ------- ---- + 0 0.66722 x + 1 1.33445 x + 2 4.00334 y + """ + return _MetadataMixin.restrict_info(self, key) + @add_or_convert_metadata @add_meta_docstring("groupby") def groupby(self, by, get_group=None): diff --git a/tests/test_metadata.py b/tests/test_metadata.py index b5c95a85c..66d36fe72 100644 --- a/tests/test_metadata.py +++ b/tests/test_metadata.py @@ -1464,6 +1464,75 @@ def test_drop_metadata_error(self, obj, obj_len, drop, error): if isinstance(drop, list) and ("label" in drop): assert "label" in obj.metadata_columns + def test_restrict_metadata(self, obj, obj_len): + """ + Test for restricting metadata with restrict_info. + """ + info = np.ones(obj_len) + obj.set_info(l1=info, l2=info * 2, l3=info * 3) + for col in ["l1", "l2", "l3"]: + assert col in obj.metadata_columns + + # restrict to 1 key + obj.restrict_info("l1") + assert "l1" in obj.metadata_columns + for col in ["l2", "l3"]: + assert col not in obj.metadata_columns + + # rate should always be present in TsGroup + if isinstance(obj, nap.TsGroup): + assert "rate" in obj.metadata_columns + + # restrict to multiple keys + obj.set_info(l2=info * 2, l3=info * 3, l4=info * 4) + obj.restrict_info(["l1", "l2"]) + for col in ["l1", "l2"]: + assert col in obj.metadata_columns + for col in ["l3", "l4"]: + assert col not in obj.metadata_columns + + # rate should always be present in TsGroup + if isinstance(obj, nap.TsGroup): + assert "rate" in obj.metadata_columns + + @pytest.mark.parametrize( + "keep, error", + [ + ( + "not_info", + pytest.raises( + KeyError, + match=r"Metadata column\(s\) \['not_info'\] not found", + ), + ), + ( + ["not_info", "not_info2"], + pytest.raises( + KeyError, + match=r"Metadata column\(s\) \['not_info', 'not_info2'\] not found", + ), + ), + ( + ["label", 0], + pytest.raises(KeyError, match=r"Metadata column\(s\) \[0\] not found"), + ), + (0, pytest.raises(TypeError, match="Invalid metadata column")), + ], + ) + def test_restrict_metadata_error(self, obj, obj_len, keep, error): + """ + Test for errors when dropping metadata. + """ + info = np.ones(obj_len) + obj.set_info(label=info, other=info * 2) + + with error: + obj.restrict_info(keep) + + # make sure nothing gets dropped + assert "label" in obj.metadata_columns + assert "other" in obj.metadata_columns + # test naming overlap of shared attributes @pytest.mark.parametrize( "name", From 75a8e947703b20a36e508b8e6eb2473715351dfc Mon Sep 17 00:00:00 2001 From: sjvenditto Date: Wed, 30 Jul 2025 14:07:33 -0400 Subject: [PATCH 097/244] fix test --- tests/test_metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_metadata.py b/tests/test_metadata.py index 66d36fe72..35a428120 100644 --- a/tests/test_metadata.py +++ b/tests/test_metadata.py @@ -2594,7 +2594,7 @@ def test_no_conflict_between_class_and_metadatamixin(nap_class): conflicting_members = iset_members.intersection(metadatamixin_members) # set_info, get_info, drop_info, groupby, and groupby_apply are overwritten for class-specific examples in docstrings - assert len(conflicting_members) == 5, ( + assert len(conflicting_members) == 6, ( f"Conflict detected! The following methods/attributes are " f"overwritten in IntervalSet: {conflicting_members}" ) From a0a7ba4a60c3440cd3454a411927f671bd30954b Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 30 Jul 2025 22:34:53 +0000 Subject: [PATCH 098/244] docstrings --- pynapple/process/decoding.py | 153 ++++++++++++++++++----------------- 1 file changed, 78 insertions(+), 75 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index c95b53b1a..49924be22 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -184,23 +184,23 @@ def decode_bayes( decode is a `Tsd` object containing the decoded feature for each time bin. >>> p - Time (s) 0 1 - ---------- --- --- - 0.5 1.0 0.0 - 1.5 1.0 0.0 - 2.5 1.0 0.0 - 3.5 1.0 0.0 - 4.5 1.0 0.0 - 5.5 1.0 0.0 - 6.5 1.0 0.0 - ... ... ... - 93.5 0.0 1.0 - 94.5 0.0 1.0 - 95.5 0.0 1.0 - 96.5 0.0 1.0 - 97.5 0.0 1.0 - 98.5 0.0 1.0 - 99.5 0.0 1.0 + Time (s) 0.0 1.0 + ---------- ----- ----- + 0.5 1.0 0.0 + 1.5 1.0 0.0 + 2.5 1.0 0.0 + 3.5 1.0 0.0 + 4.5 1.0 0.0 + 5.5 1.0 0.0 + 6.5 1.0 0.0 + ... ... ... + 93.5 0.0 1.0 + 94.5 0.0 1.0 + 95.5 0.0 1.0 + 96.5 0.0 1.0 + 97.5 0.0 1.0 + 98.5 0.0 1.0 + 99.5 0.0 1.0 dtype: float64, shape: (100, 2) p is a `TsdFrame` object containing the probability distribution for each time bin. @@ -322,11 +322,9 @@ def decode_template( Performs template matching decoding over n-dimensional features. See: - Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. - (1998). Interpreting neuronal population activity by - reconstruction: unified framework with application to - hippocampal place cells. Journal of neurophysiology, 79(2), - 1017-1044. + Vollan, A. Z., Gardner, R. J., Moser, M. B., & Moser, E. I. (2025). + Left–right-alternating theta sweeps in entorhinal–hippocampal maps of space. + Nature, 639(8004), 995–1005. Parameters ---------- @@ -339,8 +337,15 @@ def decode_template( The epochs on which decoding is computed bin_size : float Bin size. Default is second. Use the parameter time_units to change it. - metric : str, optional - The distance metric to use for template matching. Default is 'correlation'. + metric : str or callable, optional + The distance metric to use for template matching. + This is passed to `scipy.spatial.distance.cdist`, + If a string, the distance function can be ‘braycurtis’, ‘canberra’, + ‘chebyshev’, ‘cityblock’, ‘correlation’, ‘cosine’, ‘dice’, ‘euclidean’, + ‘hamming’, ‘jaccard’, ‘jensenshannon’, ‘kulczynski1’, ‘mahalanobis’, + ‘matching’, ‘minkowski’, ‘rogerstanimoto’, ‘russellrao’, ‘seuclidean’, + ‘sokalmichener’, ‘sokalsneath’, ‘sqeuclidean’, ‘yule’. + Default is 'correlation'. time_units : str, optional Time unit of the bin size ('s' [default], 'ms', 'us'). @@ -362,7 +367,7 @@ def decode_template( >>> feature = nap.Tsd(t=np.arange(0, 100, 1), d=np.repeat(np.arange(0, 2), 50)) >>> tuning_curves = nap.compute_tuning_curves(group, feature, bins=2, range=(-.5, 1.5)) >>> epochs = nap.IntervalSet([0, 100]) - >>> decoded, p = nap.decode_bayes(tuning_curves, group, epochs=epochs, bin_size=1) + >>> decoded, p = nap.decode_template(tuning_curves, group, epochs=epochs, bin_size=1) >>> decoded Time (s) ---------- -- @@ -386,23 +391,23 @@ def decode_template( decode is a `Tsd` object containing the decoded feature for each time bin. >>> p - Time (s) 0 1 - ---------- --- --- - 0.5 1.0 0.0 - 1.5 1.0 0.0 - 2.5 1.0 0.0 - 3.5 1.0 0.0 - 4.5 1.0 0.0 - 5.5 1.0 0.0 - 6.5 1.0 0.0 - ... ... ... - 93.5 0.0 1.0 - 94.5 0.0 1.0 - 95.5 0.0 1.0 - 96.5 0.0 1.0 - 97.5 0.0 1.0 - 98.5 0.0 1.0 - 99.5 0.0 1.0 + Time (s) 0.0 1.0 + ---------- ----- ----- + 0.5 1.0 0.0 + 1.5 1.0 0.0 + 2.5 1.0 0.0 + 3.5 1.0 0.0 + 4.5 1.0 0.0 + 5.5 1.0 0.0 + 6.5 1.0 0.0 + ... ... ... + 93.5 0.0 1.0 + 94.5 0.0 1.0 + 95.5 0.0 1.0 + 96.5 0.0 1.0 + 97.5 0.0 1.0 + 98.5 0.0 1.0 + 99.5 0.0 1.0 dtype: float64, shape: (100, 2) p is a `TsdFrame` object containing the probability distribution for each time bin. @@ -422,7 +427,7 @@ def decode_template( ... } ... ) >>> tuning_curves = nap.compute_tuning_curves(group, features, bins=2, range=[(-.5, 1.5)]*2) - >>> decoded, p = nap.decode_bayes(tuning_curves, group, epochs=epochs, bin_size=1) + >>> decoded, p = nap.decode_template(tuning_curves, group, epochs=epochs, bin_size=1) >>> decoded Time (s) 0 1 ---------- --- --- @@ -447,50 +452,48 @@ def decode_template( >>> p Time (s) - ---------- -------------- - 0.5 [[1., 0.] ...] - 1.5 [[0., 1.] ...] - 2.5 [[1., 0.] ...] - 3.5 [[0., 1.] ...] - 4.5 [[1., 0.] ...] - 5.5 [[0., 1.] ...] - 6.5 [[1., 0.] ...] + ---------- ------------------------ + 0.5 [[1.0e+00, 7.5e-13] ...] + 1.5 [[7.5e-13, 1.0e+00] ...] + 2.5 [[1.0e+00, 7.5e-13] ...] + 3.5 [[7.5e-13, 1.0e+00] ...] + 4.5 [[1.0e+00, 7.5e-13] ...] + 5.5 [[7.5e-13, 1.0e+00] ...] ... - 93.5 [[0., 0.] ...] - 94.5 [[0., 0.] ...] - 95.5 [[0., 0.] ...] - 96.5 [[0., 0.] ...] - 97.5 [[0., 0.] ...] - 98.5 [[0., 0.] ...] - 99.5 [[0., 0.] ...] + 95.5 [[7.5e-13, 7.5e-13] ...] + 96.5 [[7.5e-13, 7.5e-13] ...] + 97.5 [[7.5e-13, 7.5e-13] ...] + 98.5 [[7.5e-13, 7.5e-13] ...] + 99.5 [[7.5e-13, 7.5e-13] ...] dtype: float64, shape: (100, 2, 2) and p is a `TsdTensor` object containing the probability distribution for each time bin. - It is also possible to pass continuous values instead of spikes (e.g. smoothed spike counts): + It is also possible to pass continuous values instead of spikes (e.g. calcium imaging): - >>> group = group.count(1).smooth(2) + >>> time = np.arange(0,100, 0.1) + >>> group = nap.TsdFrame(t=time, d=np.stack([time % 0.5, time %1], axis=1)) >>> tuning_curves = nap.compute_tuning_curves(group, features, bins=2, range=[(-.5, 1.5)]*2) >>> decoded, p = nap.decode_bayes(tuning_curves, group, epochs=epochs, bin_size=1) >>> decoded Time (s) 0 1 ---------- --- --- - 0.5 0.0 1.0 - 1.5 0.0 1.0 - 2.5 0.0 1.0 - 3.5 0.0 1.0 - 4.5 0.0 0.0 - 5.5 0.0 0.0 - 6.5 0.0 0.0 + 0.0 0.0 0.0 + 0.1 0.0 0.0 + 0.2 0.0 0.0 + 0.3 0.0 0.0 + 0.4 0.0 0.0 + 0.5 1.0 1.0 + 0.6 1.0 1.0 ... ... ... - 92.5 1.0 0.0 - 93.5 1.0 0.0 - 94.5 1.0 0.0 - 95.5 1.0 1.0 - 96.5 1.0 1.0 - 97.5 1.0 1.0 - 98.5 1.0 1.0 - dtype: float64, shape: (98, 2) + 99.3 0.0 0.0 + 99.4 0.0 0.0 + 99.5 1.0 1.0 + 99.6 1.0 1.0 + 99.7 1.0 1.0 + 99.8 1.0 1.0 + 99.9 1.0 1.0 + dtype: float64, shape: (1000, 2) """ tc = tuning_curves.values.reshape(tuning_curves.sizes["unit"], -1).T ct = data.values From 789530931d608a29965f42cd56692fb9623dd4bf Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 31 Jul 2025 20:38:08 +0000 Subject: [PATCH 099/244] wrapper -> utput function --- pynapple/process/decoding.py | 155 ++++++++++++++++++++--------------- 1 file changed, 89 insertions(+), 66 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 49924be22..e1d39c088 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -13,7 +13,7 @@ from .. import core as nap -def _validate_decoding_inputs(func): +def _format_decoding_inputs(func): @wraps(func) def wrapper(*args, **kwargs): # Validate each positional argument @@ -60,64 +60,68 @@ def wrapper(*args, **kwargs): return wrapper -def _format_decoding_outputs(func): - @wraps(func) - def wrapper(tuning_curves, data, epochs, *args, **kwargs): - p = func(tuning_curves, data, epochs, *args, **kwargs) - idxmax = np.argmax(p, 1) - - # Fromat probability distribution - p = p.reshape(p.shape[0], *tuning_curves.shape[1:]) - if p.ndim > 2: - p = nap.TsdTensor( - t=data.index, - d=p, - time_support=epochs, - ) - else: - p = nap.TsdFrame( - t=data.index, - d=p, - time_support=epochs, - columns=tuning_curves.coords[tuning_curves.dims[1]].values, - ) +def _format_decoding_outputs(p, tuning_curves, data, epochs): + idxmax = np.argmax(p, 1) - # Format decoded - idxmax = np.unravel_index(idxmax, tuning_curves.shape[1:]) - if tuning_curves.ndim == 2: - decoded = nap.Tsd( - t=data.index, - d=tuning_curves.coords[tuning_curves.dims[1]][idxmax[0]].values, - time_support=epochs, - ) - else: - decoded = nap.TsdFrame( - t=data.index, - d=np.stack( - [ - tuning_curves.coords[dim][idxmax[i]] - for i, dim in enumerate(tuning_curves.dims[1:]) - ], - axis=1, - ), - time_support=epochs, - columns=tuning_curves.dims[1:], - ) + # Fromat probability distribution + p = p.reshape(p.shape[0], *tuning_curves.shape[1:]) + if p.ndim > 2: + p = nap.TsdTensor( + t=data.index, + d=p, + time_support=epochs, + ) + else: + p = nap.TsdFrame( + t=data.index, + d=p, + time_support=epochs, + columns=tuning_curves.coords[tuning_curves.dims[1]].values, + ) - return decoded, p + # Format decoded + idxmax = np.unravel_index(idxmax, tuning_curves.shape[1:]) + if tuning_curves.ndim == 2: + decoded = nap.Tsd( + t=data.index, + d=tuning_curves.coords[tuning_curves.dims[1]][idxmax[0]].values, + time_support=epochs, + ) + else: + decoded = nap.TsdFrame( + t=data.index, + d=np.stack( + [ + tuning_curves.coords[dim][idxmax[i]] + for i, dim in enumerate(tuning_curves.dims[1:]) + ], + axis=1, + ), + time_support=epochs, + columns=tuning_curves.dims[1:], + ) - return wrapper + return decoded, p -@_validate_decoding_inputs -@_format_decoding_outputs +@_format_decoding_inputs def decode_bayes( tuning_curves, data, epochs, bin_size, time_units="s", uniform_prior=True ): """ Performs Bayesian decoding over n-dimensional features. - See: + The algorithm is as follows: + + 1. For every time bin, we compute the distance between the neural activity + and the tuning curves using the chosen distance metric. + 2. We rescale the distance to [0,1]. + 3. We transform the distance to similarity = 1 - distance. + 4. We compute an estimated probability distribution by normalizing every time bin, + i.e. dividing by the sum over all feature bins. + 5. For every time bin, the decoded feature bin is the one that corresponds to the maximum estimated probability. + + See:\n Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. (1998). Interpreting neuronal population activity by reconstruction: unified framework with application to @@ -310,18 +314,28 @@ def decode_bayes( p3 = np.nanprod(tc**ct2, -1) p = p1 * p2 * p3 - return p / p.sum(1)[:, np.newaxis] + p = p / p.sum(1)[:, np.newaxis] + return _format_decoding_outputs(p, tuning_curves, data, epochs) -@_validate_decoding_inputs -@_format_decoding_outputs +@_format_decoding_inputs def decode_template( tuning_curves, data, epochs, bin_size, metric="correlation", time_units="s" ): """ Performs template matching decoding over n-dimensional features. - See: + The algorithm is as follows: + + 1. For every time bin, we compute the distance between the neural activity + and the tuning curves using the chosen distance metric. + 2. We rescale the distance to [0,1]. + 3. We transform the distance to similarity = 1 - distance. + 4. We compute an estimated probability distribution by normalizing every time bin, + i.e. dividing by the sum over all feature bins. + 5. For every time bin, the decoded feature bin is the one that corresponds to the maximum estimated probability. + + See:\n Vollan, A. Z., Gardner, R. J., Moser, M. B., & Moser, E. I. (2025). Left–right-alternating theta sweeps in entorhinal–hippocampal maps of space. Nature, 639(8004), 995–1005. @@ -452,19 +466,22 @@ def decode_template( >>> p Time (s) - ---------- ------------------------ - 0.5 [[1.0e+00, 7.5e-13] ...] - 1.5 [[7.5e-13, 1.0e+00] ...] - 2.5 [[1.0e+00, 7.5e-13] ...] - 3.5 [[7.5e-13, 1.0e+00] ...] - 4.5 [[1.0e+00, 7.5e-13] ...] - 5.5 [[7.5e-13, 1.0e+00] ...] + ---------- -------------- + 0.5 [[1., 0.] ...] + 1.5 [[0., 1.] ...] + 2.5 [[1., 0.] ...] + 3.5 [[0., 1.] ...] + 4.5 [[1., 0.] ...] + 5.5 [[0., 1.] ...] + 6.5 [[1., 0.] ...] ... - 95.5 [[7.5e-13, 7.5e-13] ...] - 96.5 [[7.5e-13, 7.5e-13] ...] - 97.5 [[7.5e-13, 7.5e-13] ...] - 98.5 [[7.5e-13, 7.5e-13] ...] - 99.5 [[7.5e-13, 7.5e-13] ...] + 93.5 [[0., 0.] ...] + 94.5 [[0., 0.] ...] + 95.5 [[0., 0.] ...] + 96.5 [[0., 0.] ...] + 97.5 [[0., 0.] ...] + 98.5 [[0., 0.] ...] + 99.5 [[0., 0.] ...] dtype: float64, shape: (100, 2, 2) and p is a `TsdTensor` object containing the probability distribution for each time bin. @@ -498,9 +515,15 @@ def decode_template( tc = tuning_curves.values.reshape(tuning_curves.sizes["unit"], -1).T ct = data.values + # compute distance matrix dist = cdist(ct, tc, metric=metric) - sim = 1 / (dist + 1e-12) - return sim / sim.sum(axis=1, keepdims=True) + # rescale to [0, 1] + dist = (dist - np.min(dist, keepdims=True)) / np.ptp(dist, keepdims=True) + # transform to similarity + sim = 1 - dist + # normalize + p = sim / sim.sum(axis=1, keepdims=True) + return _format_decoding_outputs(p, tuning_curves, data, epochs) # ------------------------------------------------------------------------------------- From 56b7554f1c80fdbd885cccb9825430ac7bf2ce0e Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 31 Jul 2025 20:40:13 +0000 Subject: [PATCH 100/244] bug fix in decode_1d --- pynapple/process/decoding.py | 2 +- tests/test_decoding.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 99ea0f589..b9d92d51d 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -299,7 +299,7 @@ def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): raise RuntimeError("Unknown format for feature in decode_1d") return decode_bayes( xr.DataArray( - data=tuning_curves.values, + data=tuning_curves.values.T, coords={ "unit": tuning_curves.columns.values, "0": tuning_curves.index.values, diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 2876aa1c4..6f3895dd2 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -222,7 +222,7 @@ def test_decode_bayes(n_features, binned, uniform_prior): @pytest.mark.filterwarnings("ignore") def get_testing_set_1d(): feature = nap.Tsd(t=np.arange(0, 100, 1), d=np.repeat(np.arange(0, 2), 50)) - group = nap.TsGroup({i: nap.Ts(t=np.arange(0, 50) + 50 * i) for i in range(2)}) + group = nap.TsGroup({i: nap.Ts(t=np.arange(0, 50) + 50 * i) for i in range(3)}) tc = nap.compute_1d_tuning_curves( group=group, feature=feature, nb_bins=2, minmax=(-0.5, 1.5) ) From 4590516b24d74a92b694cfa8e8c81c978a9a24f0 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 1 Aug 2025 16:27:33 +0000 Subject: [PATCH 101/244] better docstrings + euclidean default --- pynapple/process/decoding.py | 46 ++++++++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 12 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index e1d39c088..de497519f 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -111,15 +111,35 @@ def decode_bayes( """ Performs Bayesian decoding over n-dimensional features. - The algorithm is as follows: + The algorithm is based on Bayes' rule: - 1. For every time bin, we compute the distance between the neural activity - and the tuning curves using the chosen distance metric. - 2. We rescale the distance to [0,1]. - 3. We transform the distance to similarity = 1 - distance. - 4. We compute an estimated probability distribution by normalizing every time bin, - i.e. dividing by the sum over all feature bins. - 5. For every time bin, the decoded feature bin is the one that corresponds to the maximum estimated probability. + .. math:: + + P(x|n) \\propto P(n|x) P(x) + + where: + + - :math:`P(x|n)` is the **posterior probability** of the feature value given the observed neural activity. + - :math:`P(n|x)` is the **likelihood** of the neural activity given the feature value. + - :math:`P(x)` is the **prior** probability of the feature value. + + Mapping this to the function: + + - :math:`P(x|n)` is the estimated probability distribution over the decoded feature for each time bin. + This is the output of the function. The decoded value is the one with the maximum posterior probability. + - :math:`P(n|x)` is determined by the tuning curves. Assuming spikes follow a Poisson distribution and + neurons are conditionally independent: + + .. math:: + + P(n|x) = \\prod_{i=1}^{N} P(n_i|x) = \\prod_{i=1}^{N} \\frac{\\lambda_i^{n_i} e^{-\\lambda_i}}{n_i!} + + where :math:`\\lambda_i` is the expected firing rate of neuron :math:`i` at feature value :math:`x`, + and :math:`n_i` is the spike count of neuron :math:`i`. + + - :math:`P(x)` depends on the value of the ``uniform_prior`` argument. + If ``uniform_prior=True``, it is a uniform distribution over feature values. + If ``uniform_prior=False``, it is based on the occupancy (i.e. the time spent in each feature bin during tuning curve estimation). See:\n Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. @@ -320,7 +340,7 @@ def decode_bayes( @_format_decoding_inputs def decode_template( - tuning_curves, data, epochs, bin_size, metric="correlation", time_units="s" + tuning_curves, data, epochs, bin_size, metric="euclidean", time_units="s" ): """ Performs template matching decoding over n-dimensional features. @@ -336,9 +356,11 @@ def decode_template( 5. For every time bin, the decoded feature bin is the one that corresponds to the maximum estimated probability. See:\n - Vollan, A. Z., Gardner, R. J., Moser, M. B., & Moser, E. I. (2025). - Left–right-alternating theta sweeps in entorhinal–hippocampal maps of space. - Nature, 639(8004), 995–1005. + Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. + (1998). Interpreting neuronal population activity by + reconstruction: unified framework with application to + hippocampal place cells. Journal of neurophysiology, 79(2), + 1017-1044. Parameters ---------- From 41139059a76a430b94e5aacaab2fe05a56ed814e Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 1 Aug 2025 16:35:58 +0000 Subject: [PATCH 102/244] remove plot files... --- decode_bayes_1d.pdf | Bin 30194 -> 0 bytes decode_template_2d.pdf | Bin 16052 -> 0 bytes tuning_curves_1d.pdf | Bin 12078 -> 0 bytes tuning_curves_2d.pdf | Bin 19982 -> 0 bytes 4 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 decode_bayes_1d.pdf delete mode 100644 decode_template_2d.pdf delete mode 100644 tuning_curves_1d.pdf delete mode 100644 tuning_curves_2d.pdf diff --git a/decode_bayes_1d.pdf b/decode_bayes_1d.pdf deleted file mode 100644 index 8dd713f9546aefd7ed8f10204e16022c0449957c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30194 zcmce-1zZ&E`Zq43lt_mtv2?REEZrd~EfUfqy>xd;cddX*2ofR+A}NA&C@3fmOR6-I z!hdGb=kc7w?;PLX|Ge*q&oHwycUdP983P9LBP7$dgw%JP>Y(llfg7 zF);{K$J^No0+ls$GqZQHfj~9QY^+=%yg&mjh=c@=m7^utQ24hF3Qmr05I$4`sDXz0 zT`LPWh|uXzc`r9bZ8tMFD+vGT4NWsQH!HX!L;(DW1J%A`W@+na0}=fB6z*i9ZRG~J z1xzca0FYwk;+ zRVzmuw>uD?pR+00+5@~opbGYY0OYJJoGh(Cl3m^4R%Q-3UWoP_l@udI!tmW6?5kRa zOQn+ujBoQ>2<2{<1%;c@_E^wIhE>JV?(O<9R~N2Q#}%zjJ90#pEhAP+%lr^xH+Me{ zX*eD4tjzb(t`)Hu}M8)1cjx%F4^8xaCka#F%X%g8s%duhOky76dvC#JT;L&T7-)}3L@!QMaaQJT>J z$3|aYOyKeM44Y6-hLfq^kC7DjbkTffItKko~gB86Pa#J^Lxa=qYia!Qwk1}Zi` zt$8m?ML*xY>D1iFDEmNC;!yMZ_pwHy7afmwY;I}yK8Qo|yb$~0-?Fe4$+THO;P$9J z#Np%Q0>azK+0s-tuX@DI5ow^9Roz$3=SX$sQr<$@10(xx<~FyOSk*z`>%G+X_&k51tBk5PH+)$4rhfB3YnI;Kbf zpUlB#w~9iBnqa}AY$5*X#dMndm3fXj|I$|rQc`Wz=t7G9x_95s+l|d@9yJIFMz(m+ zJ9(@x$OUXdYO1J+g%n2@f-@?z^&z!6yPL>O?3e!CrkPXXQUiSy+lhTn9*5syl|grM z$ft6b$6^i__VyT)>|Ar5Ci~D89OHYxt=9RSH-2grO3u2DV zt1nl)ZxO%!zArL!_sS&Oh`>k6lEEcoW@Vhr=t1xEq+PT|4zwc$D zzDsv^W>NT1#ErvYHa6faVrN|k z-rEeJue|$+kSQH%D~!~<*Ji0%p_Wi>bL(*FUAslxsL&n9*~bY4YGPd;f_HMtx;)l> zgwQ=6Ke!wJvWU8>q|L>_ULoB)&14SC=gxqTqI%WR1Fnk7M*L;wLT+8X&5jqbg1rn; zzIN4?zITlkE(>~92qWS~`R;}kTi=x9l%M%x&-ZipE@dr1Gqe<3gNLxy z4BLyjK68Duy@(H4ie{qUhzYI)jX8}E#ft1 zcuRUY+H*}DmJ>~Lr=#n51n7;|oCz@B7~cx+7yIm{bI&1m{5$*{<7&C4Kr3B!$c-?w z>s*V0oZsftcwn!Ltjt&EQxI%SOczx)meDFh^6ELCFw@rDOlBP+v{bo$-qOJ_=U$%= z7Y4}%s%kGk^s!NGLZVT@cUOA_N%ze2X9aX_=NC?Q-{y0+Vj&@#%j$e96q~|q=9r|A zzvGzmQ0c;~1}$Ud4b~9(G86x-tWFFvC7UMOA-Ck9bE$8}3}wlE>V=8pN8gx?mQ+); zvKw~3CfreonjCn&Si?q2D;w-1NY1N~$+bohO_qGVrram1IP0BAeJd>vC4nDXB?AVo za$Hhr$IXh@TVaN1gZ$rgmBxz(;&EWbyg62jEi*&UDS2-*9^w+8U;X~+T6IDN!vL>U zKgN=b^}4F1VkfC}&+1JaDV4{GmzU|3YV!h0?}nf;F-MI(-AD61XR%UFOGFg*l{y1w z()=R3RVg8t3jwVW&f!`YL5*(6@GR-2^qWzNNJBPtO!8JNgKy8&&2Cw&?i6gkAWEey zna+bnXz=N$J?Q7Zxbv<^#nB9t4pR+%sA%q?1`xZzr*Y=;ZH_UfPgDyIX3IE;Wjgm}VeF$xCASPN>9j zB9hokAfv!RzR@}SgQ^4$D0@Dxk&cC~zUj&yP zPybql`(pEaDvMNRZOvEGLM6rFGg>qu^HAJuB*~=(`yD@_pHcYF6nH# zwRui7+;Nu0-xz`A!c6#;o~Q_;DjhTJVzL8SPr~Zl#y$a0l?=u!Z|NIGUm9yzQt1@v zE)IW?R%_MIx-`(D`xZu0l=>A>Uw7}lOc$^BEwky;X&jGH{hC-6lGOO>Vp%A4TW+#` z<0zJk5*eM<)uCj>1l}BFdYPHX9kz;F0{st=Ow2=WuoR{_%JZQZ2nw5!acNb$onGX1 zEd1AOL*}G?LpnIGa?pAs?e8(hitCCdhPW$>#?bhdrnRAyP!d(x+Y7kWEc==%q( zrJNm0lUrAgf38Z>JR39g-?qVwR$~v9gnl zsqQNFDJX`q+FvluL^!-=E#<|77F95$AVv}IeQpqGGdvDR*5(cRS~SZ1Lg~$NmikUU z`i%?hWvqj5c5ci``AuJI79XGo03vS~j?~q>HCy9Pnwge|1 zm_*Ydx3nchbk*H((mpWN$`Rp$FjuN?KI1{aI=N#)qxV0mLx@lb>S`(>e=H3gg2>4thy3c3r@HIW=(zu(J zOraKD4Absk(QeV+*L)jUo6}K36o-}Sz@mOv>at+8@_9aWdL0>FN}HO3>jZ}14F%<} z@|o)xq4I5lF_7SGKk?7L3nYr-0>pd-aANj$qNtDOhc+D+Frz8%U#2d(DB&tsc1=8zS}0xx#nYs1~nrS24^zmE4pIK$--gZbdyD8PB;_G2yxT0<(I2i1nlly) zR9|XTtuVP)!+n+K+_?wum@x5Wzr1aFbzSbe9O3##k8PaOnyg|JkzK;0$4{c^cBj)c zLIdv+qFWUYmge|Hojafn56b^Un2lVCkLAup(5}MS(8?>QOyuqx-ROL4qCUk$XSVew zr;K3nL3=iVHuJRG;-Qt5q2!QM{Evs2S1-~DNe*ewgc2y4rsIDa;D3l?wduk3WGm_| zR8F+3e1(TshbqoADt#t)W+Cg9ie@;SNFn6X#PY0!Rp3Y3?KVd)o0v~{0yt@0JuyrX z2~3g=kd_c9~?Je&jO_O4hl}l#aA4bW`GnT5- zya%!HQe(LihjB}9@L(CrK_m{mZs#fuWyR*-=XFvPm5=I7czwumo-xz?-nyU&=Q*y& zHtOopng@n2;m?khoEcn$An-s| z%z!c6qk@I7aiJ8En74b4&8h;g{pPAHRv- z?5K@Tt&~8|q((0*Vc4nLoe=Rk_IkHrL?aSE9N6=yR&oL{9(Azp(YqP^mA>{)ywDRq zV#u|!w9B2z)UgUwNco6?^bAWwQpYS@BJ!g>{YI?(lVohED~QpfmsgH1i z8z?5cyv*OT4V+vAJ*GQMBNWa(atlsKf$6-^xH z$k^ye9dg~z>&3>Mdd2r{qpOkn*P=1*guP{LZJS$bG_0F+uiNAr%Dr-Aa#_cFrZsl# zp>#uG>pQtwgL`i;QqS&^O9q$`X7+j%UB>>HH~;R5kWh~)^^d)bj@>Eh53cu$=1Nk= z9<3Gbnwl3bo#z+$u;iqEROx9DzAgf%-q_~-Chd2^sTcLoaJpdi$>fuc7tks_QB{nP zhlZc+#?9LM+Q>+L^x}+FK6NaBdfLok=OUhFY~5~O-yP|c9J820+r|AR5MUr(1ce?i z@6>DDOPI+M*#6NGIZh#&n`w31Gtz1m3wHHo$r}LQ0!XR@(I|O_~oD z1J&(5*B?9`ufU&sDH2vix&OITW|LuTz?ofYBDj7G?fi^#?Rn&*8FLFsG6}^kLk{Up z&y$OB?T|UJlJ&C`^RuRfs@s6I9DaUb?q7v2RKW+lc?+lt{cOk!7J>e$=)=n=091PZ zL&ZnnhO*;19<2IB(qpN@r_5_vr7I79oV^ol!)(8VM? z8qo&KeYyn>E|Db1?>VfIX$7>xoIy0v4|E3bDhZhatQowzbJf=#@i%;z9@=U2l~#0Q zvSh2(RA9?3<9{3b;2`b}Q_0#%aPCfH)A2c3HJ!SD6)x}Z6}n%AGC}_TEHm-M*MzdP zSaNH6Rg6JFQsP~f@V({}JE;^SORC$uC8EKJHzN7o*vxi?4ps3xJCVlPVO7#^W2v1_ zX=!_%?S*>@t=KHdf@?}QUAH)`B0Da3q|rk9gKdVuWrxqfo8<|Ttpbye)~|l0mcguI zadWgXyp&}iW`_{%ebl=cpg!g)ex`=8Pd1h@pS=oR%F) z%Ndk_rRIO4gpXV3Ka(P!tm25y0|;LtoQt64;8X5a0n4k*$-psY#^rmIpUC9zroU3X zsZE{8pAr_hyzX3ee^b|h&dtZ`^N&XDWCr%0vNLWgjYjannOE<;T^R11O+22AyP%R#affzmy3+9@*X;&v z5qWb;IGNdHMgq%H@;P;1_AJ!gg~XLf1=?`r-oxS~pc!n=kh@ zFt8hjFQ5;dCstf1lKmQ;Moh!&2VLUztK}747x|Rstnt#(=#AAIKC@fXqHH~)CbFJ2 zT9Wr4l&)QcO>$`oxLH}|`8>$J)SDPpL3q=RJ;PmUD0Nk9=BoXz5R#qiWz6^U`IiX0 z*F971{M?B(Dyxt>eYA#s<-}L-J+=3e9`0fwd~rF3Io*}^#(wFwCn8+8Qy6ErgU0jI zto?knlnJ>*{mQ!T^qv2)*ck#7u4Z?#;?aM#HXpmgFKHD zY*1&!T}8>=(@WIG%P%BrzMM@VW0hn8nS;N)jj`C$+cVQWeI5NAbu?Mp83ciEasG)Q zLD(M?#IJu!kRbPsp9F>QxW49iYL7>*N)={>VSa@o8&cI9^fs7r1~-uc<~#&{$@}vC zxq}ii`7?<5{guo0hlg4-!C zC@x0GJIY8P!d;KyK5G3rx`}qCnqI3LPdspSwNhbITFhh>)>4nGsC zd-$#>&l5*2rkAx?&5@?G8(@gvs8iwbPh7Eo!2FyW-~bTa5AoG$bqLQ8*cq>;c=>q# zKwuiMKCG+`3oBq?(J1YXnF4179U)sVA<}b`23zWqD=M(?7ND7&EklSWj!K zwl{x&j!#^=J55cO8gt1kBFXt<*_>l;Jo$-7+JFJ(n&95OnK80Tt}@xuT*S0} z%WOpi*_vh5kMgx{IG0EiFO}s5&YK!>{;RzB&dTce{{ZgdnxgS?w76ej;m~!4O?qc) zg(sZWYf}~I*{qtX0X2LKgM`6~xIC$qbByKr6~WChG8>MwUqV|UPh_E@vOOQzIk3`9N#(x zZ!sr}BOx=xg5Lc~TH5uK_(> z{IL^Ll~IqOOVnLK_BSws86&+Sug$z0!=I8DB(<4Cm};?_(`ITo=W%OO#<6;CS|kLy zq~7HyzT;)0(y#W0oYRw-v#>lPg~I9OYR0Z&nnTiM`2o&LG9&iM`@F%JepfOT-REoz zJ0`x{SAIV_*Kg0SdoN?mF&l&Z+}xJ2xiwJ#MD@E8j zw;K)7VAfV_{|5QAj>#P7pZ0{^`M&-4cPiX?7kbN z?c`0U^ogJ|iS`Orh{t`Sli82^jVCS!Cnssp(cyN1+!><~& z_YmDYQ+raxFTD-VJ7V6^Qjn2N9-8u$ad!}B z!#{d~WOw32dbKQn#vNF|B|f&&Yg9ujg4xb&#IN$l-|!t=c5p-2i=?i)1sN>3JyHXK zci>;aeVs)dLx+1^4dbK&o{(n}b4}vShAc@^GTwr~iNTLe*UOdK3Cu~3PEE5Wh!Q+> zLdxXq26|(IDlQ^ITLEdcffhr~p!!;q<7?09U*Sr9k8X3x){1}7Z0B4!0G(&n9z8=u zXQV;Ge~<>r$JSqN;l`5lExLUnfa$x$jnvl3=F0?(jxU)f#Eq!1vsfMZ-pFF);&Mb7 zy995ib6+x2)QrFX^^w_!rj-kEJ4ISsYxt?TU(04AzM!3#xoUj|X=fxvJOY2RuG4_w zz2U|k`+*Yx334~6nM|QuN<0oMU&^PtK$!2<`RtL!MLeEhVfrE3X&P3u;b`x4qW0$m2< zYFvw|@>kB{W)}q7eMlE}xGkcS74vjezMx$s-h00vxazp(;g5E^of}on_{RbW-x(JKsrmub zHRW*{g%w_Bnf8QwXLkr`CiyUYYT#u#`~fj_4f~siqU}-e917dLA8Q3d#-`(5+#k+R z-x(RvpRmU#_#ANO-2B{_$qErt=}FNt_h`wL&b9TPF# zndg@?|8;8q-;;~e9ag`#N

V&D~IYt`wZ$4iKo!>3$?m9&Vs!!pSel2jLNd@dLYx z`1rt$Nq^N&{s4Ari7GgNUM_+w+S^ad63VwOTqpbXqt-w97S;vpZK5uCws02L0sq#G z?_Am{L(`EP$h>yC%a7vXX`f>l_hqs&t1|N8W@ERl=-&F^9%1t}hJ&W<+TaCqrCmca zEgL_3T%4>62Iz|+4@h+vjW01&76quf6vkhy;uJ~3XTFh)WvF%A3~9Rvr#v*HkIt1G zxaQrtujc9NkA^-hI(&wd&PpHz|A0~XL>Li}KwNNb3}IF>3fV2|dGY$-qy+P$;=|?h z{FJZA)Vh60?no?GDLi;JXt%_1Jz4HGSu&Mou|+ zX4m(MQ9`Zy^CaPQeEqI7EJJz<&&fOlmF^nZJH`JnxIP%}XryqDJ~6L^PAtEmtCtC9 zHUU8mXG1qz$*ypNU(=JWB{nxf6S?1VdM5fv|w6lFEjt@6i zH$(%D(KuZFT+dL;8Tr+p1ZR|k2zo;zTfu*hY5`sAjFHbsp?Cm;{*NVN`2-cb3qbjE z7x6H@rGsYCg?BmJ_2gU7eMe#o@o=5sx{wFrTz2U9Noi;)YBp+1(&OcCO6d4#>EU-x zN0z@&DID~tIPPs({i8ICTERX@PK9cEoTWs$)ii98v> z>c+&l93$msIGZ1v2jS3fm`{3g=gkx!Psr58$N=^c2Lq&(4n)~PQ{aKehR-(js7oex zZ_3A~xYlL(N3pEqOYJi}6gIUZR>K5ejVr~oq`S(D+8Bw+RX>=~+u%uN@p3(P6&Hdm zDi~y#B|7R|pbhq0B5i~C3DrnhfJogu?B(i|S4KX3tRiOP7gv|x?6mHcrSU(2FcbLp!) zYt7}dnC`(xNQkqT*q{pomz(8MY&?4o-nX)}lD>{+z>bbS4_`{ayuUPF*(e#6U zyy^&6Y27y<0crS`bHW2CvX*35q@-c%kzZUV5*3B8T0Xbo=O??CQ6RFX+Xl&S3kj z6pH^3at!f=*FXw|CA+qdb0SqR%KE@hqxf#*>PaQNO8q6D^A@q;gM-&fv)@GA&Y5d} z&!}U!N763EDAbvC8?Uh7J(XgTkGb#_BkJeZ;%BRt4hZX$t^Ehz%DiH{6XaW_Z987% zLfl3raB+oo1@WkXpf0b(W|Tj9g1E9H4V`6|@#n5C&-aWn{OHMsFT&T_Rbo_$Ey;?{735yrO z88x(v(GYVM3J&SM5_w`WhXLjSfo33?pUn6+(WzbdHvLL42AZBQ<;c>=dYZTf(>pg( z-r*+#Jy)=a<>|!sZuN8Lh+TTlTjpI)twy#vRc%*e z=pP)TShkWCUBX59{RNZ zzfH7&*?}q4%-rC%sNLaQ+}vPt0{s1T1@00Dh|>f%tpgjwQTxPkpz@BuK5uYK``=sX zf8D}?%Gp|5TLBx=!5!$gAYk#*)!EDf*c=b==M3&tx3{u(`*{hswYlR4;RmWG&Q@?+ zCrb!FuuAl?f;&N=PL2RmZk|pML167K3k(Yp1_scC2m{_j2Lc0($sBHGVQ1xr8V&}? z;pgwuF=0S2GvFQsY5{>-LZHB4*1*Pg0Ijw-Jlw!6_7JE8ga_DD4|M`gz@Tt|P+&#~ z)E!7ef!p3dA`0wehw9r}y4?YIKi$6n_tya^b^IzE|E=!d6o!)S|DYW}^!8>pu7G_0 zNKewP7T^|t7%xz&17`q#aPop>Cs{LRB`cH|xpANdzb=6f2!Kx;Y%Qc6ZS1W;WNEuu zIp~2GYGz(2jvzdI{Jg(^UGU3Q-+~BobN@FEod4}Oe1IkK14!ftvSMLiy(Y*fhyw#w zph5rwc!dNYd@yd*App<`J_p3_by=0x16lgJ0_Ubp}+1^NSD^Rs2FcAloSNK^*)8sUS}%Fadx9d;K*E zNEA0W4(i#@0bsz3pI-+Egi{QFIQYi{C2If|ASQl0P26O`A0*Jo9AHOj1>kJA7&^r$53|{|spmcru9BlK~ z0pbGGDtP{DGYHD@|MGRR5Y*lfAQH(z0NtTtk|u-~TtnL_}T z0{3u!`Dm2S<~{YOzwVv-ZQh?j&I0&G>2w%NK%A#nRuI(d3t$*)034_HY{3YFn$8}8 z@#&QV1mFjC&j~P@pI1OQ0Z@%1;_oN~7(c2V>dq+vfLcy(odzVnpL4?@K>mh$;0gi! zCTg6YaqZ```7npP+4hUP^g6&ga;fLD$V;pHUk3O&t|`o@Y{;#Z@PZP1t8Y{eG}y57ao)W zVt;b3%nJb^iDFU>^bn_)3Ot;DXFQzFwtzjN9{fEVs{gO?PGjl6R*q1g$U#oyF)$LN{Xt zZ^a52$0}X5=QE4rwTP1y3FWqlV=4>gvWw?*isz{EXLpTfbB{M-^^P}U@r`Hpk7o*w zXM7UO5FSS#8AlfrM_U{QNs6U;5J{bJk1je+;gSi|HjX|zjwU%)@TDhp<~<`;uXy^$ zp^8QUAD1}k#-Dqgv(MCHU37|(ASM}0rah_x~hk`SxF z>8I9er_yTg_g+JZ+F6m!TLITp9_yCed0kntFd3wX$vHT`+e7ECx&Et41O#L-8^(cO!qjgN&Cgwv$Q$nyp&5?RTi>vOooD?wa1oa0$> zfb3Sr*3DJZ3NGv9;0&akKw5S7dkP={MSa`|d~508in1zAIPhJi1u8KIYz~D!AWKAD zxjO)v{V77v7#_6Dp9N0f&FfDm@Ye^E-zMyu1>}QsU6NcN(V377S0H?k&~Bi~tA`L! zX^O>%c$r@iNQQ)PV?Cx<78awM4uY^jB0}$B5;P-QoXQVPdP`R(jg56DJ~URI^fZpF zmabO3`o1;0JmKWyIZU{`fA2oyZdW)QA;z(>VZF?q*k7Lr(3Bg7*ri=;Puk)FDPg8v^zZa`K{j0avu?Wmb@%B4=w_R*mp7&^ zt6eI)I!1x? z3q>d>W-whmMySO5aU?p98})Ro{2IO(Wt*WZ-hc%-m+&ocpk zPyMDdkw5Q~6`yOOZ&b1oPJhd4Cvi*sZ&`^W{qX;m6+UcA_Ae2S$oB94SvQS&#NmF* zK$iu4N-xc&F@B}l~>r>}}_&?oC@V!B6ji%}qrd^gwaIQ)3+ zyvQR3YWO;T)3pQ|yh-{mLNtPYr=J+Icb}yR9IPzRd1E6JsE^zt1eBl9q+L z!;Uhb6Ix=t$u|k#e&`&@O^s{Yede4k_B(kr4rxm6hz>k5*;w5`gOWE+EJgKty^8kZ z4UkW%Z_2FR=-&(BKV@Qyc5&`+HLoRZxq$FJWyupXuD`G%3Yr%H=2Hf#Km+@QoP9jd z!~kfc*!7D9jSR#Q0EmRPwd#$P4|8(lJy}{-OtSYmcQVoyy(BapdJDY8AsU^fxkr!R z_8r&l!OHmB^f#|;BuFw(ic92A-P&2vyLN(cwe?9i^CMTU9g&S5xA+SSxyQ|m^}Bh6 zRrjR<%)(#;@}Lt)1trV8SO^}D+1DXk49l6oOfngyg?3{qTs1-VYER! z@3XbSog&s;OFSKII)uY$XPs+AH1>Iu2b_u@DUfLGed218uKK z7IaPiou`2LBNa*AM~F@aXthker`bM<8mHH_=1tv=EH-g?ygs{!f1rQO6$^7ggGgc} zgqkL0u6>~U^8QBkd5LMo`NHyfF$H8``M#wa9@JlZl2xMopx!xEs0V7fO7V^M|5$vC zpfz-PbAcYDSnhw)k=A|0SkG*wSpAOg{f@241MDrVCSeCLW%t^>jUvH?7gzM7 zP8_Ch1C%;{It&WfTu8YUaK89+S1LSt4Pf3j{Nwiey`c!GeCKNBq5^yvUA(3w|w^ zkl)Maj~=@B`}2xKj+rLfw5dEj=Avxho$=1{Rq*{UqV^xeuCvcWSE(8(54bSxi>4%j z9v_?g0^^5iXd8s95Aqv~7job6B3LDf1KF!zNlZW4+}8UMIl;RRV-^?OE7MR|T;FDH zJ9)T8v`zg^(yevM-4*fS13aRaCx#mIz|4!bDA8R69dMWTRZ!D5(kN-=Mu6sY^HdMT zv|vmo`?D@O{oYD}`N#YljCbi5*HSJfaQAC(Vgf?ynfBxo<2u%}212f+uIrHAql8ai z$Jv-;06LwA6z6H$R0e(JBC`Y*skca2kEcx^4ACw60-iwwj1K#qw3~QfYyeChMO%_A zk}3iLs#)wZs9E~RPEp+77*WJa^1N~i5PlF736yke+`q9v1CyIHJB8K|7;q%uQ4Mc? z=NC!f9hoJzxa;Jv&qc03u41-Q*VsRFa~*5$NMSXx&gMC8akqumN~c7AUX`C)PqWPR z%eqld+$vuNNl@H>^C&QVDmo*7Hww?y^>%Buh)MuUyUon+JODm={QtpkKJR|_=7cn% zn5n1m998k!k^Pb)P$@f`+9;)n2i#8;d4_*QXY&rLfDY|J$LAl5F;o1=6qn@zQb~R! zzj4u3gDfE)zwD7I1mS=-2mC`8s{^N zIH7BwVkM+rf6Td-rX@=Xj~4C?B+QHLpLtx}ZZijv32*(_2$0#WA$6anFy0SA;&UPH z7eM}bn#-`tSip7gBY_|XOq`a4g4G}MlKlZGwq4vpS?zV>I?ZT8h|V=Lp|+reFbSxu zSV@)CE5|_kfQX`DtQj=ZPp`Bw#y45_vswZP#WA;|K&!kL*nRRGv`Uz_-A7?R)gz4o zmkw;F^tDHko%MOAAO)~UfC|itfLG&2iphbIxC@|D0vCp#1SWKFKxp`~{j#XgP6>$i zKXr*@Tg317wTi#FR~7jl^ZO9-2ZhngJk#_GjoZW&&R4#2&6BsS(Xj}~cu6^Y*DkE` zb${fiKy1TKt!Sv4j2H0NZLhO zRE7}`h}f*tI|@uGQUHDXU4c{6f$3L$pL@eSlTMT}mX%kLX zn-p2xraKAHCEzh$BDL2yyvtK6kV00iRRg3`19z{{burItDSuWl2{B0(nIsMFw%JrL zP3@iY8rXk$bOWGx40dYuzo-NP`g2+gzc4VjK#~1_;l`ro{L$iO=3ZCm9C9xxaD0Fc z**D9uk(pJ@7;yZWoe=IR?#1KI6nIE#faBfH*f03H8tV!1 z*c}q4ev)r4^F-2gyC!eXnUGJ?`#fAAO(y_s{o$GpQl8ahq*L7SIG=jjw z;RcM}m&cmG>VW|C@klYWpNwDuFe63gvJe3EJ@Z+O20^QPs5#9U3Ho$h`Tr!o3EhQVU;&ptZQV`inzYi`wkr^5vx3m5z;c zQTgjsH}MsJaBH3{b0#G2o0Ch%BSBs4kbHez%?(4t$ZO zwdA!Xab2Apf1|nP$nV?KJ;~Xrth{ustz_;=labsF%5Pn6S*cBVtdkr~d6NXh$EDji z+w39Ky}nuMN&`YuSGOX5^a;(qTEDuaRVLtBcn*E}8 z*JSqF%;EI6H3}&ABDvD?Vb-kJ#*gg?mLFv0)F#2r#A`EJvX2KdE>5x^muYi5KEBQA z6V^LL9FRHS+wV{(Ef%&~%)X$;XUPI8%7$)n2S_-mW&evT@~3V^&ZBR-cq=ziuAe+> zmN^=OIGOqO$`mj9-dyskFDB+9!QZGVFGdQ@CWKM32tYS*cKoqy5Op^erg!B)JvGSt zNjB0l_13K|(r?nU3bdi$!pEd04HR4s&e42ICN8rO?626cIG*kq9XSXpDc6h+MA!e3 z_)$pxj{;DYg*EF}q^xxoBatn}n>RP0!9$kAZerFChU%4YsGN3FL&+l}U&6n{z$5qQ zC&~J?OT43}9!(#vmwLS=ZnAqoBAKmw5Ky#kAqQg5`-w!f{`5)GT){Ab&-2B}migHM zk&AN?I3LZ0TZNrS*I>2n_~!d>UgcF}3iTyE0PWJ6gmgjzJ9Xh!|EofSjRYhdFIK4E92JWRicWPvBJ+5W{MY&RO2(^wWqO}#82j4 zJdRr<^#zS!QJ;R zIIT~Hsv{eZ8_CixB_zV;ncMWAEhfqIjfVOT;Zmf4-W{mPVE^SoT);xWPgf6AWPpm? zPnQQ2WPnP|PxlGbV}PRGPcI3SV}Lr%&$t0pV}Jr*mp5Q^LZ+CCO9n9Lv=D6rTY$$~ z=+&2Sc3U!PSEIF~m!VD5zFBUWgD#u;vyu@sxHI!#T_5XhB%YiLbf&4;j{T99r&2QI zQ|Jx5pzj<1X)l-rP$ZVA!+`M7MnTTn`!A74IeqMdKREod{hOdBBI?1lQ9?6`#NQ2v z*>kGKfdUs0OE{oi&}je)JX zFsv8s@vXjcS{SME3r=}^lpB_l-ywVWUR|g4+M0d~g4QH)E`p_(`G^)^ToYs*bCYSq zVViM%hsKjeaYH`f`bq*!%B#me&F{rY%8`;E@60V9zY#x&ADoSdMHQd$)aL2AJ{N4G zJ5Z|PmRoH$^WtAw54VpJ)($W9R?tHCkX}qvzxMadUp!Z9;N5K7@VivEy~e|F^%U(7IP&xbeK0d zH5#9=sEG{_ZOSq=6HH+?r?=C5w~>G_$B>r~jE_jV<`urwHc33Nz#}+L7c%{p5HppB zOxZ}0Zt85Zo1{PfSc@FFb;3|xAtkwoESw|pit!@$!9OJ0+n(ewiRqRfc%&=41pjeh zPT`*EJhbf=g`kN7>-a$36JQ}_>)_NuQDFSX}aYJ(Mr|VaT{P0#^ogf;U1y(x?S-C}~Y#d26wkbF8_xS>b zXZdEWef9Sbrs#GCR`|V5K;>!l`@h)X?;RN@&Fh_*jrm%?AWqa|4y?pzy35$ttv3KK z)F9u0-5BYQ?Y9u196s_8Z;tS-nr6nA_*}Wm1diFccu8C6Nn6sL`(c3Z0Xd?EOV9#5 zfE)qdB?`1ZxGa+CPlac9YFWmq1M&>`2xPP6JtUK-!7Rf(jv?MRHq+sc>g^52cOsOzMU zrMI)A^z`$AAn0?dquvu;pO3RF0p)K!t%l87)rN*>;QEu`OymxuNCo$V%;e-c3kySQeg=+Kt z$NXw+F=h#kle8Z$m}LteIq$Ej9JH1nM`ZGPe&y)}Jm0=bEf-T=_jc|XqaVK@A?xBr_0Xw_beMS{gV7g z8WdYS!Q`Qw6F~{V~>ciFKO>;4Nk_Sdf_% z15~hULqQ!djI@^T@1bAp0}C;M=`%BmYL0ry2C@HMlYp1-T!wjn!nplRO3b+tp5V3_HBJALjH~L=$uw(oIC-Zr7F*NVs9<89PE zIws~UD0txa;>UXSR9A`{;=r%<#Pv(|4Q9#%yA3F>>B&(0F3-Ha(H{JYsYCOAJLdNs zPqL%RD|xvZ3!$fueKzuH=v=QCTXX~V4^qI`ysvcJFggdlUl7=U>)hx?vvta^Z%>uyyKd@x(5yc9H0ylK}C`<1p*n2Bp{${iGW6hh|$CXVjMsjDiB5r z$V`fWK?S9bRM9GF>udp2Tgpxmhf4|?)s;eKJ zb}4wN!aXwaV7fBg)Avq7KR3?Pb6NSdu4*&ps|S4a4-V_Olasj2fnae;g8DCf-HUr& z=$%SM4#ihi7*^GGgym0(?w&pF`?JkUpT?#2Drc|6OlX)5C?z*Qmb+k>SUa9C)35IO z!C@1bfeWII`e?8YjYu?W_hk%8ft;1`1cdbpIzLYBuS>$65(Ks+c4|9^oo}8~GaD2? zoAXBTiH&muU$1|ocZpeg(EW3W%1y8L69d-&kPopinp4t zi@5cieDsa3!@i*F#!jUNj>XjRUvE1d9F0_bf4sHXCy@o=KRCvcKO5Tir#_mlJzDx41!;L+z?IQ(|WH2Q>6~G z)V%BVU|!3*;&$JttO{J*Z0nx8o!a+LIgNflGWs%*i%wN(iPm#@u66MRI#Yu=u)8@tar<~w$-Fq?w`o($ zojYDpU;eJ_;ityTXDluBwbmbR)kSz6Niclcfof(sbT~iHH-*W69^!Ja(CCat(&pfj zur%3}fvkUn%fia!FcuQ<;L@-#InJ$x4Dg>d-(}oa?sC42D4R!FTs^+Lq_ zF_KPwxvp31w(Q{xFN(RQ6N%dDJ&2|a-HL~DR)dV+O$)>Luu1*)(!iYG%imbHy->}S2qV7VeD+O z*_8h;IdR8pr?sZPg@grpHuuO-Z%<%q#ik)aU`a&m(#l~t^*t}@CJr`F3bG?FC!AU{ z7Fk$R68RFh{gicv8lgiP$Al{}NMVCXj1a@X90nGG32Y+<2{YKo02T+k!+ZwY7+^Bf zc{>0Qo?v&F+h71-VgtKFw88*DsFu4LQIjVd!s@pS(PWGULXlzB|`N>1q&s@>MwL5nE3^wDz2OgB}(2Kai>Ix`x$(dX5Wt2!QuY0d7 z$R^m`6$_)Pth*~rked?7S+I1YS{ER9UikEqay4qiTq-&ZNnbojFS(jlqT$*|!}dLO zZQShI2)6)`$%w`FSz-H9^M>iQ<6m7mJRFGn)n`lx{mZuN0YQmlqRsmQx)we{o6&+_ z-~O039?;};%IO=_RN+{hq5tD1=E!Hyc5+Pybf@W5zb8#7(O6RB=fRUxhmxoDv>=)) zk^Bf3*0E`|348B#})6ZSBpBiX1`AySN%xMU}(MLE%PXTNaJ}; zd{EmH$n~2sB|5>++7DXGV+UO=#*Nk0RfxX;x>hSjl+*blP0Xfy=M2xoBIMfyl*$+1R z>m*-m#*Zp;O(yT$`MP^!nTuLs=WA+Bd*GX3f!v<_`23Au|D)j#T@1_q@l~BQ2F?~q zs-F&V3cL2r2A%i;`1aMEo1I?fst@^&SL}Zhmg%B@5gFCVkFhRlQD>SL1 zUD>!9zON2OPJcb7&x*7AB;t?)bLfbz{W>IcY8HZ{E-7A)gibKbpeB6ZLL_vE>7m^d z0sZ6kT7%kJ1DZE>R+q+kSd?xvoc0-VH`8CCAAPfL(*cU7wJtc`nZ25<&1W91PxhPW zh}@&Qq=hwpj%hGf2>~;3-1Y7%rz(E*yeTtT^uoe+h^&41SIShxb@AXPE-{g z31`^-_m4lkoLiFNPfBAAo~-n`c!34ZzYwQnrTb(#EV zf&b$hrzbk3P0!NEFBS|QTH*U^-k0em_>6%*{Zy8jELPwq3|7e9QjysN_6Ypb|AQA( z_w~kCeK7UYL{Z&?>Qy@?#|uu$_5n|PeG%EB>@^fB+oGIGy>LAK@UXP#@!*TVsUWUN z>7lT0^TA)|`k%a`ye=Ej5G~sSiEJE_+h{zC8X?I|6&aQ2mU42K(ezzum%%s z9{+Wp5pW2uD-SrX%Nguh=8g&&=+XbBXDJX4PkNSkW1~${1?Ur*&OTXfU3wnMfsecN zFa^1fx)ytBg#fE?r$hgC$Flkkw72QLn{7stHj~OWQ%rvDD%T7z&EPK{Te&%>iyz}n z(ga@W+s^)E>#fTX{bX;FE;#n1WATqGJZA^3^vEoDuqy9?n};0_`!PT3p0_t(3f{)C z2kxylJ|I9B=JM|PCg1fVg$Nur+?`*s&`yTAwNif_Wb@QI!_$sS~$`;AE0;_nJ* zMP(Gw7zkS7n++9O9KVi57v)el=2EWjBX8#0Z{gcrjkDdxw+ZHxLIuRj2?UM+ze`~K zCD-ahKr50gV+BBOAm{>fd7{AV0?+gU&*c0^#u);mvQ)z}=~z)Z=F|ZMtvHs202oGD z0xyer%BEa*$250`RDe8OytB}UbAaA3$`n}a7kDzmw~X~-U-V)|dJVi6hV^wX?&)5P zK9b$pt;aM^5CE?6uygUf&Lz^$rC!V^c5OR`9F$w#jzu5M_SV~Jl+FLDmAU*{E7SaP ztEKm%9T!>%=UPbK=tw{b3K@55YO(${{sbIr}&-T;euS#v!l5gwF}0JA_J&vLE$aFu(;O3zG}ifHFG z*-jNPSOMRWp5ySMPAAnX+2FLABI1y)}q z{>E=4GtJ|HX1SPRmLf3yNMQ0Az$mHc(oDm%Y1mKGF{clpPi7#j$-!JN?NGp7-Y^^< zDx0h#1;DqUKgqOZ^H;3Rai^yKyHp1fo`24C0GB7vbX3+?FVNhepdj(u=i_Z;WiG`m zla|F;l*OFFoH$(=L-t;sDxEJqF-Q9N7~N*-rh(pz3T%aq7EgqgNY}DCdg6sTjBIt+ zd2I7VVq1O25=9&zCG1q9&E<*QB~m>$2Pd{wXZWb%66d&**c_DDb}qw55hqd>(iLfU zQ$+m|={$D75!>8JY^%mNI^WflZLTJ^MKfOMFzQutp-RFFD*>- z=ZUxyX%xE*Bi=BF5u}JqoQ2a-65dy!JxviUkVpq4^wpX^inOOZ5lKSd#OA1pN2uaU z^BIrSaT`&>y(rotp2$KX4c9z}q9vq=o=E8P*&MWZ#6%o3hjC5~w@gJSP@?fuMA{Pi zdQCq?8cT^5mLiIl&@I{hhT@U=;+VyZoocu(N$Wnf0YEGSxeis);JbQQbbk=?H)zNEl7IuFNJ7A7eRmnhLL zugX-$=_m-D6@>KEm&Xj0Xj&!yx z!f@WpW5-c6GEX#LB30CMK+!CDq8}vESayE`o1-aiHegt2;4Ui(FQRBYDI!aW)JC&P zML4A-v`5j3QbchQDT!SM{ZeG?oR2#;TWEoz-CMb1E>35bFi3$G#uN2Qq`TN0ZE?&J z#w%6ED_zFUSvX%%T%p5QJfb0PR%TeBaMjAfsae943bcq6(E*7xfL*32j+w{Ep6!~* z<|v3aXfPa5xE=+ex)P0(B65(>L)aW$aiKcnks@wXa#i4;*q6uig}{a z6p^`vzMai66_416N0b>p^Kc>+Aqhq6;)#kRQnu!lg76C^8a_p&ETP-7%UnJgacTP4 za{c?I6J#-ioG-}~yWdZp?*fs3#01W)CPl$m0m%)2TcDJ}+vn8T5CO3Ni<0Gg@>Z#* zVdiIbYaQR(EJ2^-Cj|$&O}=jt1TmcnM80`OA)dwwLFy_1C5+56&{4OD zSfJKqz~|eG4k%Ld--F%YL3{|Lwj9blMDC7=+0FR?v)9I&NU*lSVRppCL`FN{@q2#% z%sL_}%v==@^&UcEgWk~FCnHked5>~38+%*%cF+%m z$wpqk+n!)YLSSk+8A9C4%g6}CE7y(!#RKGI_DDTrd6^w@^*r-fdwZw{CoiKQIQPtU z@U!wZAdqbl6kDzx87j5Q%cxMyK~6>?BCz|+V<{wr3YU}FL9qaN84RYpj7mU==$Xe- z|Ht=HZSCK)3)SvDe<4sQQ1D^q_Ynz10Qu!*cJK9rNU$foe=Oxa{h%reg1yhs8AJeF z%gc~*4Kronr|5>i;9s=CnPEix2lPWhrg%le*gm{mc9T0 diff --git a/decode_template_2d.pdf b/decode_template_2d.pdf deleted file mode 100644 index a13365bcfcc2b8d43ab249c73e18f80c5da8554c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 16052 zcmd^mby$>3^st1)Qc_9@i?qbs5c>z0dFY{`+>Hd1l^uCr-WRoS8Ypqob%S3==_sc*>^0rHvpM7z%ba zw*yH^f+1(UU9ez?f*H=t!Py!N(J`~ex`Rc526|v=X%N=Q65kO0QwL>dCma}l*Z^Xn zZElCPz=6fSJt}$ORP=FXI4l_PtwP5Phr_x#fsyz}5Jca`%+l7$8jSk>*3H>MABzKD z1g2F~20+1j;lL0zM*sx*@43SFTn%jU6F9Kn5x~QZhu#efK=%!Nh#uD6*~85Ozy}Y% z9vGsDwX`*pclH8igaUu07z!+k6a|}rAPN9dKu>ot=37fSCnsm%35IX>Ga>M=e^5>X z>tv0y0mHt}rfll~;0lH)I{*Yw#9BC8V(}omiYFk8)gy z5so%kE#0|>fw24M-f})_kHad7oDsSB3ca`Eoz-)bO=r_hP2_(3`FOkLv+g7|h)+a8 z%LHHMRI>MnPh|UlycK(`dVjkvg=X{Xo6$*gcJjhydZ2VK#<-rJ=f0{#QQ{cuSAp^^V{y}rHTvXacL|#oFY&t5C#ogI zufnH~>%;9x_Tq8nwThxmw z#bx=?1PM+*gz9~8R`biPu_J_&`7|T<{aN06y{&@?h>&^LzS^4a81U+Z?S|>btq^0^XRApDj@DC9^+gsOdx3 zDwj0UTyJxt6g87`L+ZAA`Q1Sq_R@1&^I_*o0u@8no@E?JXPh`+`o-}t*haVE?T;rY_xs)e(Eut(i z*il4R_vp%nRuPx{7sc2KC(xDg`LKnnr8IuvKF_&R<&9%0S3ea>I8%?FBR+a^rx&q( zWtPTv?Ep? z_|K9uG(j?JXQ~%k!_rop4bOh5N&GY;y(Le+`t;S)k5iRzF6Yv09CJyz8-04|wG4g4bb3@90O?lSI|WHWroE3d0y*i2FMaoHx5f zX_wd%vQgDgU~iO4-B=+K8V_m_KAeshzN+w! zJ5Wu-^rc$tdts7D}g(~F?BzM!Y2bR59;Y@7riwzd6~s!nt`)p;sB5 zn-?UV9i}^Xkenr1_Q6g7HRMXWal7-a#Qcue)EqG*3-%_ZoMe;&dnYB)aO$d!V{Oou z>8<2p2}E+%HnlZB(cAuy_8d=*21vgUFy|Iar#gP{@9R97n~;0%jSc3?DX3ZdS^22! zJk`NoW@{IQlqlg;BI)GqM$#`&Y&C!NZzNlHVVk6_>!t#M0Hvt z7BT9c!k+%>3vJj6EhXbnw!I^)8(L5o$WJ4k(`+@!$WsB{+Y3F`!|ySsFvWP#6N)&Z zS$wVi{@Ln?7hmPWhTRNk&G1cV=L*UHHCqZ5K{3>_N<#ywW0oYl($b# zp_03_wEDDiq96CJ9V1d65U9$|Q9yrPyGeVLXf@R9&X`Q^v)h7%Wcmu%N!-Wu>AN2s z)sK}gNEKx|tslm}#BHRYOpAM7{&~!}VQhs{qtwN5(IMN75PI5!P{PimKtho61_z9K zUOu&^Qy07`M>CLVbO*jq=ov%maZ?kDO58P#Q*O$<-d!&q1x&=v=@C}* zhOw30uZgM%tV%6!Yw6oi?PtcZz;(G6A9H6_PJ&LU7Jsy|q3f>4R_2SHeV{O7_IS!# znT;#*#Tw&>UbZwcw*p`bPXe(y4$E%Nit{}Ft1T(HQI8!(ghpJ z*t_&(={i`MK9yMciLBAiWOzQyy#nhg&ZGDDaBW9VM;eC7oDyGI!cwlfTtN8*GvT1^MjM z6;@E&OT7qAmJqX4>6;l>!s@Ihm*RV27Yd68IsK54R^u(^#!p6+V3rRH8H4EkU&07* zw$@+c6=qcC%)>aZ2fn(;o;#FxD&O*vsywsW!ia^L>G>c5mG*46E=Cd)E7|d~dtq!+ zu@^@`JP@N_wllp1-6i-w)6_bU|$OZq&U9v&C$9dnlBQiOSt@e$nx zlGY1nFyk)9&tWFHlIG>3MdEc}q)7w6lf6-OGf+GZYop;Dg777L6x~|In8`q z%v(6KVLqo0D%jjR#^)+b?GSmsnplwQ`&}RKmuthj)9rQWcgTVEiTbe~+vB0rYG%)v zBjt8oGZ>}SQp~cSd!dMk&iHVLJQDZ4`=WeH?h!$YT&i{0@rwAY;WiuN z@b|@jA1=29BweGIxyDr#6U&%v?-!J3_qv~2^fld!u-4>ahmTDoov(bSKJ3f|*gsHp z9-T^B?{+@7pR&GbJMJ|Rv@a*Q6{E1IroVN!v3X(g+Cj#8q5JS?;Lg?6FKR7sJ2DTz z?7MF*3$v5;WTG5CNL_yJS3LQ4U(iFQp3*bq!TV;l)JgGC(H52xmO3S_L4Ty zJ0v!WJxxa|pEF;4LWa6QTUn(@cITsse8f?gy4vyFhBw-K$7pVaU+Z|*5Ls$*#bTI# zagD1z1Jr3Q-?cm*`a$vL9a`_X4O3|cXzt5CUz&FjImdUBB}SgEZj_(x;PzH-cW^q% z$}70qM?~ScY3^TB`Jn1Cxoci#YApRi$~@QO<4(-c0(=9TEb%sx_tgw;M^sQ?sECoq zQ94YE+)`1nQ?*?MIR+bNB;-6!IK>}B^J*mUx|~`e9;ziUX;kef-m1#<^5-DFxXMp1 zCRasDFt5&Lo~o%xlR$A07=$zz4ko4FyWPH?N9GkjVM6|;R$*vgvxe;4MR(1Wdn9Gg zE^ds{!9t4~hS?o2M0pHdMdVTxrQWuzi#w)K*-c-S=LNp~tjoqf|57}cI<2{|$sFz; zxrz{Gp)YAMl%bu*}k6_v)Q-Hy0&&fGp#eTYM@LRKVv5*_Eh8^F_n zQ9lV^BB|p(Go~6Yhj@+v4J#n|^x_Qs-YdRby^am)3W;hUps1OoOsHBcDuv1&Z%(4g zo29p}sSpjIr-aSk*W}9yd}~W>kLJC)bzO@P+DCh}mWYT3;eOL&E0XoC&H3k)($LyFSL5NAq}UDQvwF^o}-AV}XyzX6qi@li^u$G7~5HazMG zGKI@SN7mJXY@^~`hMPi#i1&LE>A&*zseUPxtE7COY}CcS7;#M8Dr?BpS$?cJF0{>H z;z>fR`|}7sdxF=uwW@jPLK6wBhdXT+q6P^}{)jxQ<7RlT@U#f!PDOl{ad5Q3z%;~oeselSfZ!-Aod z2AQeV^I^`7mM01wF-t3ukO*i4yqTgDYe%kYLFP=f?gICe1xhteSK{{C$%bd`a`QR1 z+ZAFH9_ky>-dP2yK5~N8;F=nVFV};PW{wi7B$=Wh!%V^ZeP{U#MqX9* zMXs<>154@6Ca7j5!}d>Z9LTwS0ay)o@63>mx-kQ z%G~e3q|dxPuZ%U34UxsBZOMR`*KU*y)GJJ<_i=UyBt}ZgOLHVMrXg9Irur&a%g*(A zTxDsC=y7jWSPi+yUd(XjNSw@^nnLkG3YN%AAztm$_h zqbU3t-~=}vXxj@J&Yqf)5e`ph=kI|ACrE^M+j%lu9vw-Q+kYOu2gMMC*u#ZRL2o7G zj`!WUt|cj{uE(Nx?)aVK!C#XEPj2TB_Cqg(mD2~R1e_}D?NeWYljF?Sd$4rlg=I_$ zqQS&1`F?STK1Tg*VR!LSk1fx^qsivt_vSNis^~o;R=Z|3J<-PzRC1X#^D-${es^)s zSZv5sL%*tzOvbU&ApL{ZUh~x(Qz28zA}`Y5Z3PmkO4J&wr+<2iK z@{9r+!hRRzU9YN8<-0MYaf=gdF4s!j=fb-)L2pEN0CTkf&V0CQZ1L7l8q=2Q8#t7L z)!QtfxlA{wrVL8_@YW_K&{%h2?c(woimI(op=d@-bCofT%T9;X;+OXwKF1C=qCfZT z?Ipcx`|>);w{LHLZaHZEN~N*FBj?KsW)flh67r9zvXczzIG5vteW=it%MT;n9DC>c zwG#!O(6})Uc3UOYH^R12u`+jtOJfn2_+Si|k33vlG?Ux)2wh36?H;-7U6xPYnyo1TUzW~i zO+L=Q|*R;}3($-OGHL&>(U30(($t!P7{IW#OF*dzPI-LbG zRAMlr^P*vYe4PoU?ITFZW---kmvn^q`SPT4OvSK?OUUF=_ZNO4E{x-{vs+#1F3VS< zFjozV*PaAfi-eF?G*D$J-+ac!BF6pbrt?NDm19)^x6p{&g=F*8AZxU@yuNy7h34M% zP1`{e!%wqg@j4^3-U4ca_NU~=U^u4X68ic0sx1o*X4l&Jp@D>Qn>M;2Iltk2|L67H z)Ca5-U&>r&L$4Il_RTeH*s7G1hrUPWKN-k8c+zm4Vq++U*n!cO-i*Id_UQn;qTH}d z=f2yZBw3G;&V4#e=wMgiS;5?fV@AtSXW#TGaP_%T=#@*Jj0sO}1V1Ga>-3{H7PolX zv*q5UFJyo;CsosBebUWbs{GzO)Z2fZ@?7T|G?RgIz@2ilsJ{DEw&;#bhwEHnQ9VTA z6oC&DgX-tOulsn0?O`&HJNk$}L^-%aO1276{YR60Uwho4x<_6@QGIG=DEU#stne@! zi0#7+BY0usg;Q^c?_ zPs(6ixm;=PRL;t#;+-F(CN5ey>Wpz4}BX=dJ?nk3vJo1vU&aZGiZT}%v-BIQV1%2gXQkgjFt*$izp(*^dV@$ z!mJ0LOU{~M=2y5L;T6r;bAc-@_i5KfZJ;m9W9r`9wly(T8RSiCnY%8RA733}9!=cd zj&3g%hunl+b{UcYpLsT|W4B^x{`9lgT6E&U#Eu0)*^O&kSJ$K{VB# zGh=czY?(@%ea(tbKTCa-Epo|!R}l0LeQ&z2G}-tfCz#NiPdni5)kI46TI>-_3cLTJ z%UF*xjdiu`N+F+pdBMsEe0YtR0man1g+?q)vc__;t4ZowUNoQ>IAf>VJJ-zr+RM?6 zOvk(GZ2M`uIZ@LYwG-`-x?loFryGLIMnhE=xoCcQDk)yIWkpvV-kVuQ=I=~s%u zG?VhKIt353?BYZiNP`WO#Wtm+b=ksuOqF$2PuvLV@I59c3OQF_h&IR_Uebi}-3X$c zydos-k&9-!fH3y@+~F#_@rrxPDiGe=?lmFLiN(&LbF56UoxXy7s%~YH+h&Es@BH3( z_(6^CcM62SgB+&ctIkV$#1;;x(XNX0V_USox&ub=Ick~Q+tusbQYlSc6 z$dxbTgCNGF*nDX@##^NdQHBkkOX~u}FJpO(d0iQ5EFYP!KF}69@`Aa5WwDT8=D|Zw z&aijplru%0rIzB5EOa~?6ukt>Ag}2ylk?-xUfSx7FE z?>YfNZEnff#n z7hc~B($!)d7wV6atd_sYr8{l_zHpvQ9@LU-P?`pG-DaW?nW5rF;_I@5V9+M zlD{+oO%SSt*h;k$csGxn<*U@b4Ufp~E0j zPS5fz8kCiP_t;|?$6VU9-{+M*m?jeBthj_y(Yqn!YoW(8OOonSq5cTLQYK)~nrNw~ zIrDDF%TmBXZa7jxYmLQH;Jn5qbg*{RQbTa|9`(7?5Jm9>r&JExENPi%I&xbhaoYA| z6Xai76bvKX68z&do9mJa5^N<~i{HJg=Avag*P+VSO(@!mYS2%39KT-E6s*2|aQS+i z=V&DlMfy(v&0WbitRHF~C1GUzgLZbtZnBTC4E4Kxzb4X5`ku zT9|M%CqnqeMTNb+72a)^(Rj$wpkmdz9WlKjbDpUe=ojg z!LK}lL02w3&FSX5>B&9_9T|#oe|6tP+k&kpr64bQuu- z80q^t$oB)K!$TANK?(|v5&Lni=ue=Bx$$W1pia&w*<-|o4js%Wb{^hs@sjKF6 z1V;8?nenS^VHMwI+MU(h{e%0Vx)36wXV17d+m*ScyKZjqU+^1PMGS697%p}XtIsVq zhl2`EcYth3huzM@DYID92vgv14Xvcpj!PWZ(d=^dTto!Ph1(jd48DerO0JMMNHe)w z6dP8UtG=1mYi^m?&OSY$GCnO-r!_gIKAmU1re^GUN!!u(GLr{`*wHv@4v8kxy;Dm< zBI0pQA8!G>H3gh#;p-d`xo3x|D`|QCt+>4gva~ky5ltWEMn5$B%BeWQ^T!u_Zp4=AvEiWZa4EgqvNQl{h^paY^Ayt)5I~?ww5*%#4U05{I?S z_Q_4B7`ho=L2HN;O!1H%P2A9%V{y0`$nZhEj4v|>u|hkrbtTc>&x2mOvU>6CLr$ZI z<@78!svNxJ#`?Kw?^8wcCA)L#?%iQ66&Ep0;F*7aeJUr(%FjnnjTRc@SJrPcM2H$m zcwl{^C;be&Qp^QJYL$c=5NYT+rWdlU#l^L#XM5KgQ&!B01J9&k1JX^@?hV6B$DVa)j9)lz zE%M^1g>6O`rr#;bguXi3qr6z(Kn0ph8H}s#Oa!~1EqLD_Bf7MLj^oRU-2C`L$P?pF z{e}V+&B?gDd)g-u6_mH;y-CR8CU;;mz00ojV1)n1832l#WDF0|fDrT{f4x%g-&fl6rCML%SI@kAg6ajXMhwC{oSn=PPSF z^tPuVdJj$-W%_&2B~3FeGts43Ut@l0d9X+NHO%PLA1H`Fr~ltjP*Yd!1`(7JP`00B zIWHwN6)U7EEFVk^P={cM$t&X%l2Z*RXkft{LQ=bNobF=#tp0hE4-9?c`zH@lmrv>I z?&{qZF_$+9XC2O^-IB_+r!%Z>ev1*xPjhb}9evGmN%Fx)>3|o>3TT$4dGoTp+pplUexdr(K=N>m67R039D{o zpkU%SIWF&f*PWr+drRYd<`*y851$>ej)Z5zc-`PD`N%QcFamF7gh!1V}g@R(D|1N@X(K;QdP8EWH zF7!B{daXzfRc}j4M1#D@302Cj0fKv!*K2q(0>iDZr-B`ib%T#9R;NWUE1t|Ch#Q<$ zCpyCO;&}fb;K4^``~r_C68-N0DaD;}BL{kPoKINDf@;ppy#3f`$f97g$2*$08?jgTGh#8eQ}&j zCj&h*jSuF>_nHq}LnVgqGgq)}&R<2#VwA7K4coC*NOzs{$0Q-Y3&lTyJ~E0=4Ri%5FdN>l)~<81NFL9cDp z037syK5*=LV8lvbSS`==(amyVYl|ugiR3B}+b~k9l4yCBp#{|M)wu@IQHou>T<9QaTOM#4yT1lMohz zHlOtp>*JAUiVohZ?tMJ<__&hZX=GZw0K_Aa$54Mp+%LI{+B@`>lO>Ahv=Sttif3m8 z^!94R62s2))8u;!h!h))KLz~s9oCbhXHe-br|BDWraIxDs2p)b=fl{mFM>x3Oebo= zZoM>2lF>f9vJLzyHi!_$MVHFIk(JT$|#{G zqGgLuZ{jjBs1~84wMEKl4!0QzUw=?{=__juS0knD$A~W1biJ6|R(qGiVaO{!{bzq5 z(H|T_^na0ts0UPSP%=f|B2!|2-jC9{i5)YoRL6Ln>iMRnj8BrM2{^$!6-crug+fhS z1KuY?DU4NgVlvnA&7QWb5=Vb1(tEc_lbE$uHXr(ifKZ;r>JOm(!9#>0q5m$aQrZ~m zPAKJzeULx+x`#pSOv0&^n_sV$ujHI0rp@u{yPI!ulo}SG*2@-=Qp52D5iKhel+NWN z-whRVL8%3V$(>ibAbl}IgY*8;ORBr$43eyxmVsHl;V0`8`d5fH^)6O>tPrdpJI8Hu zkJ6&LywOF}LjHT`?)1TJr8wg}fJu&9?a=5k|8=+i&2 z?+-rLe?bq9ssZdB6agiTR}Pg;j*F1L!FfXUNY~J-K=P&Qh9t5qd1%rK-Y~%+L8>Ej zHB_m8nEDUS3b2CtcgYdfJLA?3rhMTJGRcAOk}gWV#x;61AJoPtC@E8$zK6XEjZpV9 z1lQ${chyA?$%f2TmJWpstG?(H4STL4zp_}=+8Dp;*p=87tr5x|v;aN;0RQe~1?}ADhAMcACg+HN67o#(K`RIBN)bE3yu* zX7#2;wK}cC+XQ{@E2X~GeQ&;Dx4f|uvdwDiGb1CIimc{jLmS*J`lf>5;T|n*t(FV$ zH=Gzo@)C2S3+=WHY3AfF=q~H)>ZkiKfBn3@@m9kBD}j)^pZg!!@&^y-zi8t_iMZa$ zpiqH0a&n1C?++vY(UK9n$CMQK$=^Ce1P zYNdM)#%x_TfgL-xW>P7YCf8qP+{HtGLgT3!mo>Fany5&PyObDsS=k`P?55Ls7C2oD zB1DxlvN5pb;Q5DM3C^v@Bg^*ZW=(*_Iur+sK$IHW-X#0^hgR zQu{@Mz`1A$=WZT^Iru&W&FRmoxMzw~){NZros~jccQP0q-FfII4K?!b$ql|QUv~~- zZXlEl58|z^j`YYk z^4MJWEz`H1DsrXoI*B~GDzKW7-*l`YyTtk#B7PciFW$YLs?~Q`f8wCHv}thfX!8}3 zufLNW@kbx!KkZ^+r~!X$Z3o!J;*NrNY3K1j{M!03>hXE@cH_{RM9#Ery@cDGuk@vT z(nK;ErzVE%5X;Fm!tU4WYnveD@of=S;X&=Tsr3Zuk^~A9k8kTLoM0arE7a$&nj)6I zQRhT6ZE-&!rIvF!0&LFDEF|}^M2Y^m<1k+VvK8=5W>VIt`}7NYl9>WX2+pI~CRQf4 zlBBeGALAHB$4ZfdOyu-RrzH1cik{NHF+w~Mr5o#^EwdwPlGul89XUepI!I`lt)6&q zvCFyo{XLQkpS}`gtDH^#oj!nq_}ff=^Q{lV%pvk-?%3}oOq3sS>D(wS3<&WDqXFZ17K{PfnFEpS zz?ha`2o@*-a%c-=fEobSE>1vyFboFN0YC%0uMjUV#2eU;1wz>&7i=wYHbC{a`2D}H z2ON_3$13x0_Wi_wLx%hh)CL&oU}o(O;QOEGF6VB6kMYNd0`THzz&{9!!T~}kn7OE8 z56J=rK@5Hrfg1$C-3D6=IVWoeEM8LdaahOm_=2XH*C9q=7#tz`^YwxsChQ^@1(50g zK?416$AAN01_F>Q1PTg9!_iQOLJ0KsqbB7k|3z-kW~Qd96$dd)9?fWU;qdU!q15W`WzDF2XOfOP=4?;A@C1S zJc;mN<59!sKsw|Le!6c7galAL?1`uE-vt1`Z$pRz7~|jIp#mWPAqGF#^&I(!Dkz{|#ugvT5OMFKnoV8zS89})%)@B}Xx zfX4bc{g8$;$s8<-U(*65OEAF3!x9!O`u#@Kp*jY*3RHsttt&-a^t*|20!a3)lQZ~m zYXcYvuh0P=91iuj9RMhGILc4v;BdJKbog!(09+3H0J`;X#vk?skm`oE-WtKi`4+c}c4bh}gGoe_Ml}mxOY#pAFq#*TWzRa=;7_3o|eb zuXiDGqW@zv!1epy>?aNV)6(znd4Fhaz_R{b`SY4V6aS@t!%P!^`#W(oMZtjV9Ad5v z6aL%35O%b6@&K;F{fge<5I+Rqo3Z=1eLyr=7*MOg|L@H|7##NRyMK;1<*0+fluA2N zN{}tJGfLPuPweZ{H0Jl~h+JhgrV%48lK#41VGm66V+f$>;RLqMPKx-usQilJ zqEJyJR7?~N1Kbi(sFA==3c||`YXyRW5uzZ#6o3B#o(vMum{#Dw>hLZv{s(sYUI#;9 z@Rs18b%5gn(CF7XycYgv9RiA1)W6mt|IiNbu>RQ&ctZVN2gh5Ae~br3q4CD&pLI}C zD4@;$S%*UZ4l@D`$%Yi&>VfTKYNeYl()j7+hGI!dh=pz22PgkxwO z5GFb{PGpiNoUnAM;^V1i;A!nihGUlqy4IeaWGV%YgDznR0|#qcM~Xch|LZQ*jbuRf zgqwq9l~n;NWFJpBLcA>ep(jjxIn}I6~D0C_tG^a2usSi@EyHr4)vHA(qpPjWf?Z5B$s>tE}bq z_xJJ}|8h}n_;6Ev=)Bc{AdjilR_GN2@N9>*}UADNk&D5`hcFj&fwrHg<~kT-D&~Z@xol{+T!R8D%X~b8KAr z{S(XTGm#IJCi*{D=wEC|4|#33(c4;GbYr|)=G?$X*~R(kK>PaF7K8i6)$cod$<~@o z_%3*#m^yXh+rv!#BbLc!eDf1OYCq0TCzX{g*Fa^ zVd}Ze)chk(ljGiIAr=v{E+M=%!*4R8do4ac=W)_v8J}yvbV5L;)aUffKy*`M*J+=x zj}8y_m1XOgR;IE%{!o>|nz1+AF1qF-&x_Ar-()n*cl6#%cspffHFn+ql!tdwX`$5o zP#*$enb~cX^z&P#*S11)J&&~b%b0DK-y_o6T#t5)u}i8S(m!2a*2||encu6(isMeH zmV9!EwNc8o&*?ay;iUL?k6NF+t(++&PVbk4sBkx9WyOyq)~+@~x7OXKRWFK|Y~gX` zCTbi=QG3x?8+Y>AlS|vC_oznCMjV=wzhmo=PQvFOlu&tmBiPmNv_B(3?%~e9Zr`yt zA2W_4+8Xkbb1Dw}tmNyNpWb?o?AX|57AuJ<;c(m<7M=Srew@6N|F z-f1W$a5H4beM#vr_M5kT_`|}M}oex~|tS99^U7W#l{I#NT_vGIRfvkP+%olEV5B z(r|jzxE{_KhYhIkNsbeF#x%|y)_R+DimBFWaXboFg=N2I_8mEe#VJeXouRRUer0$?Wd@v z^6yG(8V$Cl>gI+G$C5auxAL^cKH)Hq)a2HXX*oyoPEX|%&sgse&G}~V{N?KxI5qDv z9b@W7eGl%sc4FU6!3|u!Z(BGyoJbj8rKf`5cV@(R8M{=UzZgfAcbPKbzpb0~J~Jd` z%E?wWQUW*5Z(d;2X)DO?QT5rKldb)nfj4TlW28V;(c!D??hJ2n6E8}qU`q{4ahsG= zwGqj*vL=&{V;A1p7Tqi0-go6oOWjGFCp9j(@cc`QtoJM?C$uEY9vTF+7adJ9-j37y zblW(x&pXrVY~kbQ2Oio~;pEM@`y_pGj#d{6l@6)N?Cg5#Qn+UyK9p&W&+y1I3FfmV zsM$Hm-O_urlf>z9ef0sC6_f>gKOHr$JeRj$xN(1#t_E)~cY;>`coi3Su<+=X%tCUT zL5&SICHNpJB|)D)yI}hR_NLv!fswZ;)-26@qXem|!8vv;BX%zB6Gah~wu#o<7879t zdzlX6G<)sR*5A(d%MMTFJg8bY1hbks9vONtvyA<=9p${=-69$Dr0lZrAKDxn&wgT_ z!@bzGM=Sx$Gm z#oeMAZ8>YflXt}gZ$@8zV`&q~^WFCHHLg50rPFDscb*4d*&MiAx?AX>>8Pd}|J%0_ zj0fzaZmRQn%f?ut1(lAT;rXek$hqztGC2I_E%uh(Y^3WS5W$ozc3IKy&XHUl_X!dH zEt@xCjiq~%C0IwwqIniGuWpgqxGm{xa!b$f?fDS@oY0Fy_ckuPb3UYDR&zx| zy*}O_-CjSojYln-mh^Rp9fz9!ySm2Rszra03>50jBo>jZoLaf`jQF{-1i?S z)o?yOYCg^~uUcXNRv8o(KGr6IY_WHiWoru6N`575Sj6dXBd0!k&ESCTcX-C5mlJ{C zE?GXLk$KV)hx`o zC7r6HW5fZjuU$Y)rzg=BVfd;=`E$wmB`DSWad-Yy{ zFLJ++`m4O}kLgbC>aupLk{-$W#x-)@GxW*ku^dA0jv3XG%C2+%uI9BfZJ8w4Ij_sJ&;W_t2%z|p@zVHeUS=- z>do9IizRv@O25lbdcsQ@pT$}q{{r_94BJg!o8VaK?P;=ZeR zr7M>c+7I3d9)7G=I!^Mu4+je#(y_{n~QZ|gp~=iO+_dBbWr6hHMoeJXaI zC)~Tmx#{MITO?kQ)`ZXdP0RP-kGCr62t-?%w_Y8sZS*x3deW-*>`&d>d!CQzc(Mh?%2C{(&AdtwDe8C(QVqP zIwm%=C_(k;lZH1sI4rK+au_IZkm@c>db}OgBv$>*fpyL@{bfF#4BGErhh2??6_)$N z*SjPr8Q&LZ){vd-dE?5r<4OMXL(#6`d7X35krg8hr+RiV>RwQ?YdYexcfM6(BD^`I zr0eWL^Kg5}&mJvO(d4kXw{D7yRB37T8S{wKagAKFuQuEpTEB62IxTxcW4>Q@s32WY zcNvd^(CMGNT=#z#2sL*uI7faT`Ivl0>fmMwJpV#}zhE>CDnjrPjl&X_u{8~2LPT=_ zM3*~4fO-|qL}PFOT>b~h#OY~J=upg$2e=pH({e<|67PM!`E&8%Sv>?j{oA*@$DXS0 zmaDojy65nz`p?*guhORd)lCO``k#csQukKC9GRM^#u&DwZHem>Fyp3na)~_BJi6Ld z$GrQo!3trHN7NdIkj*lKEJbpB$4GZfvu)IeJN2KGwoN4OtygR96n}K+UCY7FTlOOw zN4za{Tpg|WytWV+W7q|yOPJ<%eiE15A48eBDi(SvRY*!AKrsBKVH5lP%_4qwyM5}D z4vnQ?OJ)?Br=OfuP@{<0?tP@Ix;H5kTN8Qn`*{bE^s&Xjr0FN63v^1_hDB>AcjY1F z7d8g~|36iR!4Zs*x;goDoVU+@f_+krhl!m%A}<$qsezHJVO`#Ht>*#aN}9Iu;kiN= z$TrqztI+t|fcl*#8|1gTXtO_`+Mdmpjitl!@-wc%(;DXm0`V_+BH;9wc(S`K@-Lpq z4vy@09|KH9YBtEnr>Bc?vhHMUX0?9We>6WgoY=y^z`C2M0MBG+&VG2q;jMJ7aogGc zWD5-)YgU;T21)pDzGP!HDfB2drCGT%lAFrEze}1--BWzI%fDSzc<9{rcRVUi@x@xE z1_Bqb7eoAqzPjgLpOAKGva|bOb}J$LW8Afs&Uu1X^&CECkQPDP%D$>E68m>ytks(I1?&EL`+6P72Cc zDjN+BGO2~{Bqi_;^eM0m9jtIL3%hh&gL-)3XOm>6aU&#q4s1J@q9i zyrDt@&nE7E{h0FBS+VI`&OYHO^|xm+RMAm~noljydM_+=ooCRD&UO%*{BWPrDtWZT zV84ou?ICKWV}#M6S=}woMU_w3Hch*zD2aK*y!mWZ*~sb~@51AA@H?YGN~ee>yYajB zyNAZIWZu1;6*Oz}6kJ31E6=XWW{UZ5)nf{Az5oZ*Jq9dcBvS-k_j+D2UTF*{(ul8JzU<59JaS^Q6XJg!3Gy5n>QM`-ZmzqqN#_-?HnObgoTcOo@n8#kSosr~t zt%_JjiL~I#J@1uu$G}7ld6O-~v!F5#?qQfZRTqW+G$<7#niMfM(=P5!^kW}p#h(}A z9{9G`HwYhmYvjb(`mI!jMT^dF0xFLOh2h&+cv|&rMz4Jgv3la|De!O)r-$Md1xYTU z7ejk*Z@QwJnU``yU(Y>V?#^Ilvj0b`BNA&E81yP?wZ5=_VeYcJ(oxDnY{SGeh_AaR zcDW0vW=PnLb!XFG5!2Q3E5Pi2xj9gc4Hb94PdMvFc3@eA;y9&uIJg3yp`;IQ^yLWi z9J!5;;o86B8s>mCffQA)g4%Fx$n&-=lL&FX0v6)2jXYwkE*gGsXVhm*lvvW?W+QZ2$2}Qo zu>HK)@d;9Nz_B+4ShEv=_JZUerG$KHGFNiM#m0$rko^Uwx0L7mvlg5Zd0bs2eCw z?xezm70h2AAKBc^u_HTZUyyZ+@xJAfVl*_joesl}q?U&Xo9`%n)kPP7+G0@U{JEd|JD ze>{8R{j>{Jr|!=4X+zW#cbV!$cTK69_44PQ^z!+B6@L2(aX35d%2yVzVp~K>Mbi6E z1Bw@!k7v=bTiRA;Wu)AV|1u;KG-IzqRgG+kklHU4YxbHL)cg8|fLY%WLT<0?uH2%- z-J^m%*APc$hIutdLaNr-owb18UxY0T`Vkb>vFkX`@&KS`w}A#eOVh)P6eW4p*{bT- z<*^0i3ttNgvk$locimJC=TXj04Chzgacx~p0~FMS+j;8#&-?$Y18=LlXcYV3lPrW_bIH%-3Z8cqH(@DG<9#~hugZag0gN`}8 z^sVCJx`ou*j?6Z{eIrKJj;5LoERU`~wv%V+Fd)9qkUby>+ds30tSfQT(pQw_&mjl{ zFB?zVcNJAPsw*6!xb)3I0)<4Q;1XD%5EKD;7!(?Vfj&Mge@MV1|J&(kU(keABH0)r z1b#FpGdgRM;ULeKpN~$*nlm%lO>x#yuSg|Ie#cJg%}5$(Hh%~m?@zAYxiRH@lu-T# zix-#0M74x5Y*$#SJ{y_8@j7ci5+N>Fwe2N?jry#awZ8o+7dBWTg9-gW&`oZm0SngM z_iy`Y9ZQR1%$3+5!y&2{%WS59)VkksfVzF&dRKVTfu?OIp3P|=KY4l`eT#I<8d_Qd z(4qcDP?ZZrP7o~^Je~xJsvilO&1`u1^!s8uQ>)soq4n79#XQ>ezFQpRhH_PJ7Qb{J z6h9cN{FEnlhwdFxnDicXEp<|?dVxUjt}7;a7MBwZ_vY#M1^Gx}?`}rT? zLmLc#=zKmB_f0KJBYc`$La}~Ryzg&5)Up>o{x1~wqzFO1fM-ngwJuX?3LOM`2bE(JhEL}zPMs}}m620J)dh^;!(DGN ziVO&(2|usevOlMGeTm(bH%5a;D+EHtA`i3X-4$?|Fz33Ix+ANP{~fZc0cl`!vnPkK z<&zCLX!K(r?tVqs7rl!XmF}bQFOD<6a#+~LQ~!a)9UGu_Czo@hqf~iVd6K16wdEui zk-a>Obp47r8_7ZBj&D?W`OBodH0zpL0ki2z4#S~Sp=)f!8VC<1_22t|!RPBD11h&D zfz$ksPxSh0%)BP@`5W&Rt;x7f?N(HIt>C~h^w1fP#V-NZDO@DkGlqdhK{sV3 zo$0T03kvZUj6X_`k5Vy}GxXIr=BWD+npx8z)Nw=nYmmm?vqHKNm$$}9kSUJ>+VLA7 zJh)_#7L^l`f{dQYci|SWx@Er8EA~Ke)m}tLU-6D-X|0dDZbg*(r>;Y>tRc-Bum_F% z3qT>Er$MDxV1C@RZz{y@PB$rGUugeg8*@F=hK&*Oo@Tu_B9r0bhf6-jWH?lIV^Bfe zjG=z}+FVWgccrZzGta7JdU zp2t(S%%>ruJMrY8p?^tLK&kLExlPjlbN7j+XAhDWQfwo&BqZ+g2+!usYT?+*;j{h+rxY96+R4%Yx> zD8gT0N9_w5>`EZs&0eeEy&HllACM`#SL`rWmZikx75Jkc?k*$q^4M-kPus!BsC`$r z_m$_tNxiS67V8S-s!$4@wCWild^;=ITjk*+%+)#T4hQ@!t_{?zn6qr|PuMF=;tHJz{Jc{iFMV^< zL&Fj3y>oe;SNFMdPyKkMH2Q_{;Vzl(HN;*E?fk_FKsHjEJpf6QTFt?4Hua260Sq=$hC1wsSiaGO2+;tDU)Rk(Oun?zG_WtI`!x)Q#0J~K$GiRTO4u+CU& zd)r{!*GsZGBCkBT(JdLc20j)pm7OwkQMcc44Vz(Kqd8k@HfK7~mq`Ca9-^b;I@Yk> zHu+#&UVoKa?o{4}!`~OyC94_6|EV9Kir>e!CAdP%&ove+$ z46G?0kkh1kcq%zqQ^8<_mi4bT8i#`+pfSp14-(bU-P4Tw%U&gy1Fk|I>mpHvn_GAdv(lYtYh4U(^(3>R>cWV+zSOF2jmrQko&4oE9nrUC59H8F-RijiH9NwGT}qAP^S4GP7qHE}Fyf zNaX*I0{efR2LnI@7FZk>pj9c16daGi!-&8-2vSHmnt+32h)7z&0Y0JoP=5>_wNzsW zQ2+m{z?Sc0Kwqdpb1WAe1_i7GQh*c|6fhA{3QhpFhX)pm1B(-&0!$Q5#KAy;<8gR6 z4hJRyXA1(bWr7r7014Vq4c!MHkMJOEiU7t#a}uS9pkAUuHBA@{0S46&6Od3)!2^SX z#^QkBOLKs}U|m`pg9VC^q7@wYssXi$Ao$?{9gji-0hiXrf^~7=;DaGT4ve8GWtlmsrr8g)5*E5ZLMnpThD3vEP-y!C zEw@x)I3OZzB&6Tp4Zz^i6lfqZbO+)JaK3B?%iFbFgDrzCGeYAEOF%(l)7XdX;9sml zGSQ3)7!)+>pIIQLkVqKq-meLW;O+Zzfec~E1|U25ccJMT*ac)0D+;iI|fRmwbbD#2$=wyHJtX13$#``Xt2lN z?mtd@$w>n*OB+IhgPo_fY~i%G>R>P#4y=GivxB2);pJCQK-1QS0bh>(wYDSB_fl6E zIPL8V7)}A!vqYl;>t1R>p#%s@n{+Ak05QQ83^;vY@H9FUe}G4zwY}hAi)bxh@C5y8 zL4k?}cvj{nF1t=B?5P6#TjKn8s91SGRY0wbj{l@>B9s)s3J8)l90fULgaZ11?FJ4C zzq+k(-zzZ2?{!z~+Hc~@!-pzL;y31yaCd~lG9>Zu-haMoS;|vIXgblQAkOQINZV0)7@h~0^$14 zJxMpJy%-Du@P(}x2_$!}9OiCo2e*NOKgD)=3ec6dfTJ5l8Onm)qr4xDMB|VIv=lfM zNTHEtVk?C5p_1)jNH`V^1D@#D4>)ohum?N%KQsu{KtDL;7Y&6ZVu7Al(Fl-vuBPFD z<*cGffgte*4LI&UXaKeRn+C4%tJg&$F_5i8zkm2cqKO1xivOk|(E#hLq5=N|_VwTW zh!_y;R@1=Cveh&x!XNXYkXY#X`1gEhA^>HpXn=)a1F zML|vi`u)}c7LETy4lE8h_|^TSKp#lzE@>PdSHQ79Bn3MMS9 IqN57?A4IPXhyVZp diff --git a/tuning_curves_2d.pdf b/tuning_curves_2d.pdf deleted file mode 100644 index b9f255ffa57c23fa20e7ec170de056be897b5ff5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 19982 zcmcJ%1zc3k8$U|NiiC)Q)UJqt#16E0^S2Ix${%a+e5?9!_tEQ7vCw+v-I#FxH-Wkpr0_Lp`E3*y^}2*_v5ac zGv1Kk0Y3wpRaFP15WGF$NG(S|g7S~6%8#oS{Om8}pnvZGL^nje8v#(aLq5`g;O^|{ zh6nwD=r@2Pj}xrzEtQ?UL5nExCn1T0V{s_>Sr}3UbP7D_4wv4kspRD341S@ZYQHi9 zy8V-LIs_+M4?8&eM{DZ#4xq1aq`CuOfGPp+Y)ybjcK2{2SUSSIGi$SylIAKr!&bku z)#~gkRAWA*DOM7B!-Q>N@7>I>_FY#G3LDUO-x7>Y3gBh7W?FdU&g43}BsGyJQ9a$f z#+V`1X+s&;{p8zLUBLHG8v&bN7l#!5-uiiM^$>irM6T9jv7@vN=t^p--;Ye3q z``f8ekL#Oa!^+hsDxJ56!zfTXmRVMU^PBS-0SjB+8#SL-D;3^8S@T=%9`&e8zWF+$ ztbF-v_rli)ON(PM3jymZOm%*n^DPz%A9xkER%Yj8>V}`pJegUc6gBG@_nWy*IQ^{2 zwA`#Q)%NzpNc^}=D51h^3Hf1DYwLh=CW3;ZD`QtA`^HA%OBozaA3dbmTAhxD5o#~bt* z>sMNm!^_`oxzsq!J5&sde&l&s(t*7fRbtJj8@Z6kM}7YID>L)P4X%1j$3(oVc{`bG zg3krp(yO_nsZ+bPrv;LeJhe0)Gf%Pk5;scyHNL;5<7pdXZ|7X|)7%_hu&+th4&3~F zw#>`xovp^5=H=}5J+btoJesCn1{Pm(nI?+EZarRjW2ta_yokqE$a&78WRA{AOZ_r) zU!jGgp=ixl{TUXS_qQIuRz=k8(LIJbIGJ-kPg0G!>c& zY#C-=m0jJ_jCwtrZ@EP*ilMk}E0|rP@mb$`z}oyHJd5WNnv_k^}ijxNs4KRWk{ zkbUL`qj$${*MJWLs0U}Bt_(?7*M7>fs%xf|bex|PY$Lju)cctkJipi~T63hPN(UR& zL!?L8gs|*C(w=um01+CvTcy=AG3X&5cPPa(%O#l>#eplk4T8y7{Ijfb=2~fllmhAM zx=?yf>LGjKQlaMj?5#4*6-Gs_4l8_hBbk)AGn3N%Gl@hKMAz!lvGy{*TRo|{gU;6j z@R`kHEY)SjSa*TSo9D7VjeFHoF$fv#^D|RVh1=x^Cm%dlvFoat@~bO!C6_4hNwe<~ zOXj5#HnCjFh3?w0%2#Am>oF1F86|7(FfrdCBhA3qi9m$vji6;by!8Ne8!8 z&RZCr(K*$|pf)tK6O%_WTot#?LU8V)&JbJdmzj-(B$Jd{3zWggI@mfEK zg@yKnMu=LfJRkR$NY0@RoT6h`N2$hZ@F)vyp~hVfu>tfqi|*_PSqUX`9|lf!TDKE( z5*!-oztq;+*lZ76-D42+nR(r}Bn>ujm4zhsCx>3QK8z*X-K;8SrOvm#){bpRdwC-pF*74n zAzF6-B)hTq+1?Wg+VqXAuTN7f_CFVx_Ss(kAfLUrkC36Wt;;$eZ%!-!%AdU=wSa+F zV#2I3&_dBk%1e{z{TpxAOgtZhph;`hKIF?{b)|?kXZrT%bvG=4s1!=Ixft z-Te!z<5Qjw7hl~Lde|_I38^F@@=$e*{hQJ*0{6Ex-uK(z23;Te{@hOxUhfS)9m(q#e4R)nvM1{wn0!&6f-&h1%3B< zPYL7Y4yG^LQX{iv$M1X`m+MooI&k~T#4I;dtG+h;l0uVKw_K{{JG_77_PGDYYT~l; zqtF>W=W$|gW@mi69aJx|lHYp6%~wcTpGIafyei<+1)-z|@Q4X>8s$RUyY1DXeCCmh zefu>kM%IjP*}iR;p~D<~e_Q>FQN7a{9v`H$;P`==qSN)`(*X1jt(OX)p3p%;EW7sc zK#IM2MNw9}ZS^tZ5#jJPcB}mlxX0Qb+EGL5D`2y2D0+Y`C8w1x>gGI8%!4augJ=)L zMtR>E%vbjc+Xul}E`@0#)xsp!zwa0bxp1}hHrr& zSCIsH+K$pu!;GX_Jj#M9_{0=RQ+QBuh=rX5@Z63DHS#k`(0s}$xtnk{#QYfZ!TN2F z{A(WR8fkYZdE8Zo7h=-eg9@zMPd`kmXusu8AAjQH<6^!m$!EzCwn@+ZO*Vb4Y9FMX zbtR9EO^|x|f@YBT|Df?Kc4%r*vN^)%*q01~nOU}0V{th)EQfd0n#AD9$MJ$pli4Ub z^cNj3mA1apImg<$=v_jZpCdWB9dhRAc9B4tD52-28kP)K47G?jASfHGe2f`hU&yyR znOh0LPscpg7j*dQkTimtjxqO?D@_S8NNleISN&GDB~6D&nd;e>EW(Xz{t*|Z4Tq27 z^DR%$Y_8qS>n-8BSmk}e)>dytH8|7l@FmOTcCFQX^OX;jt!TNSI7W=nFp-w|oFcBS z3oSIPyOSk?EaOgA_wQ0hG-6nXb~<~kf9Xg=-uX+H+qdY6om<%~hp%WEL}7~1!WE0- z8kv(zCjQd%91?LCd@pylpOH#?U9oKI7P5NukJa^A~02eK(V^0nq|%yS?SGb#vjx%4`O3UToJLb`)u zCuXjI4;o?KQJK{ZCK1M=c*5&Zch2znV{`BFA8^y(nv0@KdwY1p(o@3X;zr}pLfuJr z1BFwKt~@ErU$_;V=jkj~8h3p~D>QCT?7yw~l9AKMI<)Z$O#QV}#P0Ku=BDO5U)exI zqBEGKe8@EfqEqh~-C*ZqC2Dn5uE(SMZ_~Xcaz5pAioCeoBy^;dY_Demq*2g%osn%1 z=}-fM|4=tie}uw*AjMZI^vDR_SfYM0aS)Zm@h~BVzWMyCc!fY?-aRVI~w2s&{AyG zQt;k+bYKA`)a$;B^QQgSM7R#3CR!(*vxjG z&H8fx_O~5Gof_cfEFBlAp`gq!i~>lp9B*3ZClL*p9YhW>+u3YB9l#|o2`+sN`v7o# zBLSDgV1>F@Q^f0YIvpx+a1tfLiX`NMbTpBQ9FiQlOSjwd00NTBy>DJIUmP%Ml_OK* zJ_P9cT-9wrRIE)(e6Pp!d{6y)iAZ9F*P)&gVbr;=8GvVn0&5UPvy&P zBQjpEQ2oeq+R0L=01=*4iU8SzlqBP`VRu=^IM2_%&mCBSD@8hN-8Oua*Dlw%V*qb` z%SNuaZ|ZONBQMm}%0F!Q=C+stnolkfK6nn32{nKF@8s7)S;-qvYLr`k&bz zFRE~;^C66qJzlDR(Iknbgj&z3-yO=`z;lT1O6PX=7|m;upN$V}rqAiMJ&a6P{GckC zcUiu3v=>qm!Bp{8FP_n~5sMP`P(^;8HA!mAzO$3nqahe)G%=D&Z5%qar`} zU8YCzQ97$Z;5%ktHe2{C`?SHc=Hh`-Y)4>vSjru38LKdp?^CL@4HB8|{fl1(i%+_h6mLW$rDB!*dws~q(YRN;c0mqUwBLVNv>OIr?GL1`tFYr0bjG_*zh5vp)^$d9 z&#?hk7O7=z0M&(uzaj66JM=}jmwa+x+weSnn=cW0TDCq_{!1_<_5o|f;?P^A0W9Sc`7$^lpFn&0C(( z@a3rPP{!+h@7xQ7+t)p_HpY9WAdkF6_)Z|?V+&uLYtf6J&TM|sewOOFXbr8FVbtoSqsbb}o^eeHD}rflO~)?} z!>34QlFN>nRG`Z>b2?K`tf_ZCGNshsjQ9Q#AE;@*vq8j1h)+y2MZFvcH%s!@UrkIj zNh?$>_I3fdcAm=7<`-E61nB{u2`qEQfT~ z4UPF3Mo!WrYqWKimGOF)v>zJnhY_9+{`6=En%*vQs0k=(2q|-zgFFH zYI22X;ieH$rOVBqoR>0)0c?>Kk3Wpro@^2nAxQI z16g~Ni37rsWhvTmx6IyjU-wXV$hzELEgY}o?x&@!Fxs9&hrb?v|Ih~Sz^jyj!tRJl zWS#Wt%AjmaRpFSnzSG^S&5lAHnJ%1mHAiH;>o)7DW*&{E=*h(FX~UiKZSt!R&Fsf( z%b6Jw`f%Y|1uL{k(NkTg4Zak#J)w_0{$PbV>DmjL7qnn6D{Yt9A^UjY%^TB!xAsr3N* zJ9+1drLX@AV}VwReQXv%k?*KOU-!fQ@_HdjO6#`|FBl$h4Y98|a><(;mtx@59AQ*) z*MW9OuxP!DIG0d6bO=`-Ey?V7ID*tLn=^b!hY@(X{c}e~6f4rXdlYhu2Tnc?a^NG& zxma(NG;Y3{Gr-l*Q`>3frtoCIcXl0@B=Sl`rae&oa{t3o-mNi62?V#1Q zWCHIg1V8<01epO@6#Mh8Zy#HMnHT?WuboQM@+n^KCK+&t+mL>PL%L* zy$5bk=WULV7!WZ?^*M|fR6@L=<8;08Rj19@K=}wWk8^=`wt?#%`YO%;cKjP9V!7-d?!DO;*tSMCpUG2_@6xRpo1wTJQInI@v~3 z@!=f0yLFo?TT29D4jn*S)g0X?uhYJjvVY@Nm+iG?>F_Vb(?JzpxHBuEY1Vk-$VERt zEr#5!JwxKhht#!)cpIeTanHveWF1eHU!!&Dh@JPd=rA0~osGC9-Kl-n#z^;#n|FMZ z++z^6EfJ6-C#FM6&kY!U^Pj22ZJNByuUt3DvneK=@Sxx8`mJ@=UTg1Di6Y}TboJxQo-+O>;VKJm6f4EahkR2LGtaowITfQL-t9Yy z3h??e*>vb|lj&n~%3@u9vT>QpKE>~GeT)q5#jMTA`-)tRk$w#JBB^degQ$;vr7fwE@%$BVv zlK$0~Qb{{eO#htD*X)yId^nx%md#k#tQl(MVJC5CRr}25R8CY`IrjOvB<%A`#^ka6 zD0$xfU!6y3y5S-7ore}osZmx9OW&oQ)pOe# zoif|f(z3g^-_)qc=bE+r{lvD4bJexOxZ^X|Gj3{amcouvwJ&|13ueZam-6vUWNN;k z7n(hZ08zLrZX_^7R!&hz+IR#Vuq@5GljWmSk*-K8f0O?hKE$wxgq`OV%o7|BQ|#b3 zTVb+eoa1y>QU8aQR0-#et4bk9WZe|426!CmHg>;6J&=$~Wv?%*J$coLou?1GAIMjV zefAF|Ke51nUd_l$H8N9ENCq)6JAr^A;&cGYhP1eM8E+4;Aa)?SE64$>oD-moFTI~h zIbaj2xEY`AjBC`;B(o)2=tGV2AB4}3pT zsaVB9FZeXj!T+imd^nbRFBD>E6B8OfMl%OR5Yo(Z2DwH+z_)l{)#5AX?Y)&uz3tUv zx&8vCJnZqvBf!?o5Pg*+)85)#(BCzRes<2QijU!(SKb_OKT?VP(E~}0cR+-S?<)Sz zTShwIXf%dIV_rMiQjwhYTO8*r@jJls&$Z&oF|-K)&18 zzIla|`P8S%wP$KVTjIV0Kf8yJZMhSQ2mZ<7+tHT}D(hZN-Yi;Qmf3R#4cx)KkYx_s zDej;h#2y2P{d5PhT|mg{+)l`<3WTgMJ0Yv#JTFU8nL z0sAf=m~6RNP*%+6Knk02fwiYP+p_0TZ?glL4Ogut^9l}ov4+h>(?Y?j?TJ&_tz(g} zofy`65W|A)#IVHmyc$DE$W04kSXMhRtZ|(qm4?bNW-jFb5#H(5p#{Y9qS;h!M+1)k zJDU4?IRIC$u+W^+2h5`_VzuoRrV}d~X;xXMFtLmHm9M~{@5eC7>8E-$!d#sgE(l*K z5fF>@DGM$m>R@1rCrJbjL~v`$-=fdD zJzJ9k!dw52!d=BLmxlHUdxl%l$1G{?85tb#7)Y`uYw748;f>K8t=3M`N*(f{vFnJQ z|32W8y6?y}z0X-ZW$*L^=k+4S`Wkyfr)&Z&mw_1%lZVFK*4k~}A(FAR27T{NjftRy78B6f2RekdC+g}qwp64Nw2 zAG>9CyXnIzdkqyA*bxCzmW5~p}E+H*$AR^GAeWSc6_;+z)>gfp5CrQ zA>1E-0D3+jvscH@d?lF9u!t;>Uy!%$bM&0|zYG>=6Q7N^C$1Jy zuoqkU9_36Flw)+rwJAGuKb*9W=?0unh=%(J?<4sgbtm4OW#4 z1@Ni&7uVZ4iZfl7pJ=Z2R2}ti3d8~H(dz?W0t;`&YA%lQ5cM|hAb>zZyDkogrXfPU;a zAe$sCi=lezus?r%Jbx;KvgJ^__^xhD4%AZ1%B=~r`+d@{D)?=DVuY_dZKQItf#u$D zQCVfG$&=Yx3163Im<*v`ujw-8^VErN0=z=$NE+p;hd~AR!5!Ft31DB=kLFRi;!G7;i9vGPr4}?5%fI8UN4IPOe}|~?yd0q zSy0?qO4tAdBl_+uI5f>^(UL;5ZRpodv^yxT1{PH@J1GB>p?nAp0kMx>Q25|{fg%*} zyQ8BrZ@d#XM)m4d$?qKV0B)T#{*COdd4104FfM~s*dQr5WWvG0epW? zLBe+kLS1+89YCo3&n&b+kN`E-1FaSOKz%!4r(5mV>1Du9?;+Xg*EfP>GDH{2e!#h{ zfaVai>_EQAr3Mx~YF8TmTJ-q%>q3W?x4>=;6^Er5_8=ptv0gl7sg0=Akn-quaacccVd=X3IOn>6)ZvW@aZDh5fUhm5 z`JDqY+mk--?pNQl^#UCWs}Hp!+g+}=_@7?2e|xidv!q*Pu17i9){E(gr&uWx=@X>netd%K<2*Cm3~w%u3amQSU`+xMl|YZA@S9@&tyaVs`?qoQ1C%|beT z!SiIN!9%}_y9X}Zli$|vuaTC~fA)yUnumX);iGs8J6`|&v9!|tC%&2pO2j`Ai+I0H z=3bV10ou6sW9QY6Eo`K1TF}-lX$dLp&wXvA{asMu8L$cLXT@JPyZzYX1vZKy|JVL5 ztc2uAlwMI8*`ECHN!X-%3QQ>6Oz~*s)plyucJk5=osK{e6>aO3sA5C{!OAkc5rZoZ zY&&Q|qjjT+~ zd4knxBLPDK{2!ya2KK3)PpQ^1HRMSUPYMnA_}Qg6>#M9ow~fua(>GG1CX%Om*sMO? zxIECIe%4f=X(rT0rgBuqq^`aW3(x{Eup=e)+H-sQ#9*gAm+3`0>RVz_0H#soBbD*yT26)^Dk-khhyCYKG`)- zzHWV=ur(4#r5*p!4zc*I(5YAK++#x-H7jdfw*vc^le(*VdtW_jsAXVYc2H9ha=-Fs z#-gEX*M$^^ecs2`sd;X`J*>@WJTP=ecj1BDK+h`g={^tM-+7OL_5l8C&z~grf3qH2 zA+87H0cF@DA&xUbm53XoB(Zv$=noXIAd^RCsA4Wud&izejJTwV)yHcb%7iN>@Nu`Q zb2L*=JKD$E-W{VPmGMG;2)Kk(6=T{(Ilue)?$hV`?sDqK1Pdqk(tF%6ZoJp=u5_~3OHxJj z>$^LCU)Jr6Q_qFH&|Wt2wg@|#b~oMYQ=+o(*m2o|9Xzl+jI5||`K87gr=WF(c7xBG z&sV>37czBzH_QA32cg}&|KgzZe;8A>xMwU+;dE5nMoRQB*x-r7Nf9gCam<*@=cX7x zg;MXT@KNWbzIxc4&4l(CepoMFv3AT&^!YF~lZ!yWV2@E|%Bzd^SF}zF4d?Cg*=Dpo zoN&kI1wLza7k-0J?hjN#n~MKMr6lHmOCAWwHI5 z42^B%r3`^({F%X#wt;DIN9HE@KGov%C=ONbO!6!3(6rT&KM4_;bd=AS3lp%YZ)%+^B0~wX=!h&-A-SbRb!(_KWr#Hcwb(D7bdgv zhra$npG*ChuEy5veu|<~y_A2B%Kz}nQT^olk-FW?{7zK@Z{^Ln>C=Urunj6S*$g7# zXI+DqQc%0hH1vpBb2lyDJei@2Th2FFTwqGho-24CK1)uae8A=pr2RpyOQ8SvC}44A zJK#y1s2;AEawSSR8nIuKtg&Mvn0_+Q6j&5Dq-Y-=jt~wJ-c2@Ewma?jrm=s}Y7+lp zj$?7SQz$a!v)X*rc=*t8s{x`Zp}qArRR^hU7gP3-lM^l+iuP$YcI=srP{u##RO}yA z7+UH-c090QuL`h~xF;zM{?M;K=p!_kR{vYFLE%@gg`XzEs{#A*z`kHS#quW_tcEqM*4QF`OgvIaupRI6^Nb z?Z6dLf>UMS5N>x#$yLK!#G;s+sQ8sK2R0sy8)puBCMtw99zhO|S97=C>a7{M5%VM< zj~q?^hu-`_mtn)dQX=q^pu6gBq-R`>sdnsK#43cF}8+o-y zgc+63d90OP9o!@N;UXcdk^cGpJ@W?Pns*)>Bi<~eV?bcXma5g@R{e*(53bLH6&h~c z-qo^JQCv9D${o*R!p4`Y`V`aDIDH_c>r;`<8UGafr}5sQnZmuIEhf|koNuM(qgQi% zL`!HQYtvJrVcf=1`}~!@hCt*{%Jz+cRBX}QEk>0Jr*3L4a8kiI11rO&&B6-2X6 z4|1vvD+TcnYCJ>W`JIlR5J7%xP2n%cGq0$;~2|kG>{L9*ey#A-lvE zZhW75!>~u35SQ3Y`(RKy{6@OIv3I3P((X=l3u2KmyMcrL*t*=_x_Ew8?&Q`{#@)FA z7Cbj&GjSsnlQ$F`*=QNxmo}I`3Sv99|3cy_`CXmqppV;yU&^2K_3?7g2R8phs zG5=-yit1~*Q7X~ZbPF$q+T0$+rwT`W*zTiiqovs$qv&z^eNOCkxX7u;6IbrrHH>1> z!K2g>{=$8ZCLa`y;97V+iCnKQzDqp)u4(igNxel3HU&)IV}*vNo5s)_wv~N^*UWQg z3+2<5o)?%k^0V&Od1J|E%czi!5i4_-mxNELo1|JMI2j+n?jeeYX(GbzW#n~HDeZ}U zs2+!N+q-cK8SfE(GVTs~&Rx{eqWieig`PQ+g0CNvdCt0u3G7D}a4jw4DEI5z7kpLJ ze7fnxwCrAzH0rHA)wS={_@&P0U+ay|HD&ZUDdOI|AAKA-z7bnEHMRDg(&F5i$UpG# z4@w*@`5$oTc!CzA3Q$C=nf3ecgkqmg$<^Jfwp*xsphD{z^!)?eMNY2A^^ll{^>i#z zID>2aX(=54{)N=`^QxnbXr;F|L`_n?S_V4yxYuLQ6cy^v2jp%%Woj{_+hd)|`{XJ7 z;Mw>!<3-b?rS3}ExAE0wHWu#GJ#a6(f4NzE|0k9RQNe~|GNhR?o}x(&3ntf#yAqZj{OAh5X4gxKHVjshvTY*pWS~(#IoYlA%*KD{#2<_Z zssCVSOB{d15{P1$l@HT+8n5}7sYO3@^UPeRc#7_ImNwc8AIO8p;*f}MH;^2m<-3@U zY-oD)tM0aCJH*;hDDwGUCYz%>KkI<0&dpM#_N52&&LKS23U7&Aus;y^2hEQb{}1|A zKK>bF1By;%VGXvenAb0qd-=re^AVr63;DEbcKK4^W5arSIPYC+2tSuG-t>~+=)$+X zO^W=It?B0&Z{@w@*0|zpCB<&WQ^rYrUp&3eQj=)o*VC+A&2JZYsld>FINz1Ekz0a# zMrbDU=40kZ*YDc)izmJnFHLl>+Ff_4+pur@_Wj50Yt*$D#kT&?t3MbG7_`KH&l)JQR!cK5;{^bE~d1_JuLeGqdorWJLN`gQPo%3B#kl>0+7H!+jm+#ivrr zMBM|cDjp*rBtDI@i41vWpH@YFO^#fp@5LQ`mHj+DuWuO&77bG!jjnWJdW$a)O07Ul zMZv8EIYg8?@2auxbLt>dM^OTQeH&MD|WUQ|36xP>|C6|6^bS_>hr8%{} zMSfkwDCKwlpnsoRq$7(+WlML$kKA#6eN|&&Rl<2oV^2d%CwGW>Ztfl`c9w3yFh=TF z{>Wn_B*44^X9HCU?szwQ7Y}DQIAAsDFyUV|J!IfKh(r= zOAj}D($PdQ6biDBz~5gv=!ha9P7j=z1g8>7$1Pzex0RbE{sO_nfnehSmj?a+@!QSb)(#Ya*pwwW zJ_*Fq9*zcKHl!1{fTlg*NKfGCfC6tA(g#G{z!6F0DSK-VJJ2skq5gW>lEmttZpw~J z^{>Go4bK08^MFtXOIvr)_y05oO73{*?5H#bKo~UK&<{}z7SOC>>7q#>bqfVUn*7Xy zlc^q-4)%B@CtC*sARKAvL2xvNGRG~wNj-w2vErCtkFEYR7|y_PDAfNK3HJZ?Jy>9E zivuYW2ghHfuu^aw76$^OD7d5)3XYMKfMcamq$C08gziK2u|Nt)SF9vd|Nkz*e%{A| zx=@1N@iR#P>mQAR7$7AM5_plc6kHMr77j?M1ZZ3mN)i}2Mp^;}5*#OigG)$&mjDk{ zQXJHk0tBEy9=byJu^@pn!II#4=$+C)3*n@8%AhN$GgwI&bcKk3It3*h8cTXs0(5-m z9iT2~mz2kf14c-Zl7yr*=_(Bj6C5bVp)sI?JMD^tb|rupkCld=6NgKHs}x`sbR{JW zR1cgp{=qb;ebS&o3Q-8e$A_|3qk?#F?fi#Fz{!EY{?8pEl2meeYUIV#+Wa5`Zk_Sj8ekPKv z{E!Jq9)3*_U8LXO9guANOi<5pCzwf8i?$Z%Hxfzsq(o7Ys6} z2-^W_I0kZrK@}2ygJIwK0YN%u2W9~<11TUCI|1?D$w5j0LuwlKQ%e9mBvsl`7NocV zc`qgE ze|O+l_g5WA9i-ZSRS-V`6;gmo=wJ2VUtc0Xswjbnka)mI2#S$PnEzunpwxa;`vtUr zaZvuI>lYK_@4{aPPzw?Y|| z|I~xOhZafSO@Qy{QaG3q$RCPIT0#=~zJY_Yn<2>n0AE|sa{4rn6W`9p}jxsgYY zMU9TLt5KbLJI)SIiX9y1&|tCFdDcw(%Qa)bsm zzT~Z_%F(c*w@Cg_q6VYoogs0}{r0zpaQtD!tO41K0XcQr(~4@5NqtHQeJW}aL5TRc zs5hFLW55(fqm95ig=T${%e@G-)9iHQ5<7+3Pl3K-&|?Ol0P0TcOqK0#*;TSM*3u|)T4`%XX0>Y<0LA5#4Y2n z_&5xKh%Uc`vL_-h5XDL_i@Fj;iv5K>iAOFH&4daA%mn?20uO=?KM3N#AABf+$agp7 z;O#KPtuXk_aNgVqo~$U)Ga_FUQR9fOlxeK0Y^bVCh$^{}f@TzNT8!oa2fj$+5pSZp znbX;ClZcf`_|hbg=3d*W4;&L8IC#^e-+y2?6S_n^7!%k3p5x8?125lmYM8m5Ykdco zJRRHg4#9gZT9f@eUvymch?!v3h?!9Nh_L29y9dLld&3w_UKdSnXE8_OjX_DCjOd&} z>4R}`Y5hm9_A3&5)na#N2k(R;^26YH;k-8@crv39S7J3-35SD-BCbSDxT_kqsi+H4eU~|JPNev` zI61u-Rp}5R7%;h&?LFM}2yQCQjxGQgfj#)=5G63SNuT=wUmtOFC!vd;8~D-)p5!e6 z>>*7)VBV5)o{qrXfTrgkUJl{Yk7zsy^8LVozdj%`-)`H-OhFbt7OU50cf?dpGn58V zN?0f{@RB_kl}N+?qU1ouF%`l-%#l#cwX)J9gGOatU-nlkEM;y;Ei7*yF`5oIccPWu zS0(7Plr@7N9N$k13*Cg{1t~Zhwi3`wVH9Ipt?cVc%`VK=4E#BYg47%himmK4O3k0e z_tFs%cquAOxSJkaZWl+xVgi~A#-Tzj$jlfLYsqfSfK+V$d~h$FhElU0JtjOAf%m3{ zg|>lOv>XjCt?cT`L3-4fa8@{;otnDdu9aOkI5ziyH3L3JksZA5ZUXvsXso5&Ub@>V z&3c@eaQXyvCa5@&fPM^aZ-8IBsOwc**}Fnw$1V#p9}SGn6|-hohU33c!9r;g(3fdA z8uqrbi-l8Ig14qBHoFK}Ggv4Fxp07saVH3*FXWQHAAIRP!Sy_oIe3Q47%?o$b1^q-(}740D=F;4GZ;3Ko^9@j`0XG z52`eO7O-YmRcX{n1C1cF3}7gkHNy}bZ$e33 zuhYuT1A5B0myQqcf`OyK5O6*a>_ly4KcLiHwBMTHwo;JZ9!z*cZgA11dJ8@H963jW zPy)Imh+=GaEBgpwH+Y*$uHrO!_$DBemP18UkeL_}I|kSq4XC0uV)hbp2#&R6vu5B` z4nk34!fU{I?1F`YQ7#CK9YX*bLMU=Ytr>QM?o)C!IP1Y*gXd4Rvg1^mdtk880MJ{& zh3nuk&}r8MbXj1mCC6U6P6XZr#?hdbfHq^KuFnF)0*K>IK#PP^jCr-PSEw|jI56R1 zdhlUTi=vg?DJXVKT9BD1Ja$Z@m3>krsAn%G9E-pUg4kLqO9dAS$60W3sL6L@?&)y>ypA52dXc*g$0%wK5=?Cn(ku48$lH z?A_K30ziIfG2!=f6{Wys7Xn`e!-N;+D0%~-x(LTlQ&1~=wzB7gT0G!(j^Z714iyf- zH^6%!u%|$WcVogOLHF5Vq1g#&2G9W$L1y#d*fIK6_O0;PTrd?hfIu-=GjuC97qP)Y z%@fc%K$wqMGi(DAP}U3^%FR8y_k?eg$@mjzeveO(W;UeEr6thNT`2qOqQ#D%yYn3; zq_({^}D$jLxQk^wI?2Yyy(}*T&!*2R*;MDWc~9Mpem^WduJzA z=qpcxsxlZ9Mgk>?kpe+mDGchg&@Yygw;RC*?41+Gz<@LI;~((lB!C%c1OKZGvgx3I zaHk(-XelYMTK}&y-~#?$hJ(T+|9loF3EuQu8Dts%yB_e&|5*lp;efICk9sH+7BYDM zQ3km_|E2|n!k~a5`;U4kX=$l{m!WY|kfHpKdT0p@WcU874E1|GaS5>Px@}|7rsR?S%SwnKp}j8%=3(GS17~Ll m5FmU2hP{n5oCF9YFUj4*(#?Z34bjpl90tbEuXaKm_J0BU;$kxZ From e60475bde4dc0561e97ca3c9744ea71300c2972f Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 1 Aug 2025 20:39:00 +0000 Subject: [PATCH 103/244] update decoding notebook --- doc/user_guide/07_decoding.md | 245 +++++++++++++++++++++++++++++++--- pynapple/process/decoding.py | 137 ++++++++++--------- tests/test_decoding.py | 10 +- 3 files changed, 302 insertions(+), 90 deletions(-) diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index 490d7c397..2d836994c 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -24,16 +24,19 @@ custom_params = {"axes.spines.right": False, "axes.spines.top": False} sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params) ``` -Input to the decoding functions always includes: - - `tuning_curves`, computed using [`nap.compute_tuning_curves`](pynapple.process.tuning_curves.compute_tuning_curves). +Pynapple supports n-dimensional decoding from any neural modality. +For spike data, you can use [`decode_bayes`](pynapple.process.decoding.decode_bayes), which implements Bayesian decoding using a Poisson distribution. +For any other type of data (and also for spike data), you can use [`decode_template`](pynapple.process.decoding.decode_template), which implements a template matching algorithm. + +Input to both decoding functions always includes: + - `tuning_curves`, computed using [`compute_tuning_curves`](pynapple.process.tuning_curves.compute_tuning_curves). - `data`, neural activity as a `TsGroup` (spikes) or `TsdFrame` (smoothed counts). - `epochs`, to restrict decoding to certain intervals. - `bin_size`, the size of the bins in which to count the data (spikes only). - `time_units`, the units of `bin_size`, defaulting to seconds. ## Bayesian decoding -Pynapple supports n-dimensional decoding from spikes in the form of Bayesian decoding with a Poisson assumption. -In addition to the default arguments, users can set `uniform_prior=False` to use the occupancy as a prior over the feature distribution. +When using Bayesian decoding, users can additionally set `uniform_prior=False` to use the occupancy as a prior over the feature distribution. By default `uniform_prior=True`, and a uniform prior is used. :::{important} @@ -77,7 +80,11 @@ First, we compute the tuning curves: ```{code-cell} ipython3 tuning_curves_1d = nap.compute_tuning_curves( - tsgroup, feature, bins=61, range=(0, 2 * np.pi), feature_names=["feature"] + tsgroup, + feature, + bins=61, + range=(0, 2 * np.pi), + feature_names=["Circular feature"] ) ``` @@ -85,7 +92,7 @@ tuning_curves_1d = nap.compute_tuning_curves( :tags: [hide-input] tuning_curves_1d.name = "Firing rate" tuning_curves_1d.attrs["unit"] = "Hz" -tuning_curves_1d.plot.line(x="feature", add_legend=False) +tuning_curves_1d.plot.line(x="Circular feature", add_legend=False) plt.show() ``` @@ -106,14 +113,15 @@ decoded, proba_feature = nap.decode_bayes( ```{code-cell} ipython3 :tags: [hide-input] fig, (ax1, ax2) = plt.subplots(figsize=(8, 5), nrows=2, ncols=1, sharex=True) +feature=feature.restrict(epochs) ax1.plot( - np.linspace(0, len(decoded), len(feature.restrict(epochs))), - feature.restrict(epochs), + feature.times(), + feature.values, label="True", ) ax1.scatter( - np.linspace(0, len(decoded), len(decoded)), - decoded, + decoded.times(), + decoded.values, label="Decoded", c="orange", ) @@ -121,17 +129,18 @@ ax1.legend( frameon=False, bbox_to_anchor=(1.0, 1.0), ) -ax1.set_xlim(epochs[0, 0], epochs[0, 1]) -im = ax2.imshow(proba_feature.values.T, aspect="auto", origin="lower", cmap="viridis") +ax1.set_ylabel("Circular\nfeature") +ax1.set_yticks([0, 2*np.pi], ["0", "2π"]) +im = ax2.imshow(proba_feature.values.T, aspect="auto", origin="lower", cmap="viridis", extent=(0, 10.0, 0, 2*np.pi)) cbar_ax = fig.add_axes([0.93, 0.1, 0.015, 0.36]) fig.colorbar(im, cax=cbar_ax, label="Probability") -ax2.set_xticks([0, len(decoded)], epochs.values[0]) -ax2.set_yticks([]) ax2.set_xlabel("Time (s)", labelpad=-20) +ax2.set_ylabel("Circular\nfeature") +ax2.set_yticks([0, 2*np.pi], ["0", "2π"]) plt.show() ``` -### 2-dimensional Bayesian decoding +### N-dimensional Bayesian decoding ```{code-cell} ipython3 :tags: [hide-cell] @@ -161,7 +170,8 @@ for i, mu in enumerate(bin_centers): ts_group = nap.TsGroup(ts_group) ``` -Decoding also works with multiple dimensions. +Decoding also works with multiple dimensions (here we show a 2D example). + First, we compute the tuning curves: ```{code-cell} ipython3 @@ -230,3 +240,206 @@ ax3.scatter( ax3.set_title("Combined") plt.show() ``` + +## Template matching +If you do not have spike data, or if you do not want to use the Poisson assumption, Pynapple also supports decoding using template matching, which makes no assumption on the modality of your data. +Instead of computing a probability distribution, `compute_template` computes a distance matrix between the samples and the tuning curves (smaller is better). +In addition to the default arguments, users can set `metric` to choose the used distance metric. By default `metric="correlation"`. + +### 1-dimensional template matching + +```{code-cell} ipython3 +:tags: [hide-cell] +from scipy.ndimage import gaussian_filter1d + +# Fake Tuning curves +N = 6 # Number of neurons +bins = np.linspace(0, 2*np.pi, 61) +x = np.linspace(-np.pi, np.pi, len(bins)-1) +tmp = np.roll(np.exp(-(1.5*x)**2), (len(bins)-1)//2) +tc = np.array([np.roll(tmp, i*(len(bins)-1)//N) for i in range(N)]).T + +tc_1d = pd.DataFrame(index=bins[0:-1], data=tc) + +# Feature +T = 10000 +dt = 0.01 +timestep = np.arange(0, T)*dt +feature = nap.Tsd( + t=timestep, + d=gaussian_filter1d(np.cumsum(np.random.randn(T)*0.5), 20)%(2*np.pi) + ) +index = np.digitize(feature, bins)-1 + +# Spiking activity + +count = np.random.poisson(tc[index])>0 +tsgroup = nap.TsGroup({i:nap.Ts(timestep[count[:,i]]) for i in range(N)}) +epochs = nap.IntervalSet(0, 10) +``` + +First, we compute the tuning curves (here we'll use spikes as neural data): + +```{code-cell} ipython3 +tuning_curves_1d = nap.compute_tuning_curves( + tsgroup, + feature, + bins=61, + range=(0, 2 * np.pi), + feature_names=["Circular feature"] +) +``` + +```{code-cell} ipython3 +:tags: [hide-input] +tuning_curves_1d.name = "Firing rate" +tuning_curves_1d.attrs["unit"] = "Hz" +tuning_curves_1d.plot.line(x="Circular feature", add_legend=False) +plt.show() +``` + +We can then use `nap.decode_template` for template matching: + +```{code-cell} ipython3 +decoded, dist = nap.decode_template( + tuning_curves=tuning_curves_1d, + data=tsgroup, + epochs=epochs, + bin_size=0.05, + metric="correlation" +) +``` + +`decoded` is a `Tsd` object containing the decoded feature value. +`dist` is a `TsdFrame` containing the distance matrix of every time bin with respect to the tuning curves. + +```{code-cell} ipython3 +:tags: [hide-input] +fig, (ax1, ax2) = plt.subplots(figsize=(8, 5), nrows=2, ncols=1, sharex=True) +feature=feature.restrict(epochs) +ax1.plot( + feature.times(), + feature.values, + label="True", +) +ax1.scatter( + decoded.times(), + decoded.values, + label="Decoded", + c="orange", +) +ax1.legend( + frameon=False, + bbox_to_anchor=(1.0, 1.0), +) +ax1.set_ylabel("Circular\nfeature") +ax1.set_yticks([0, 2*np.pi], ["0", "2π"]) +im = ax2.imshow(dist.values.T, aspect="auto", origin="lower", cmap="inferno_r", extent=(0, 10.0, 0, 2*np.pi)) +cbar_ax = fig.add_axes([0.93, 0.1, 0.015, 0.36]) +fig.colorbar(im, cax=cbar_ax, label="Distance") +ax2.set_xlabel("Time (s)", labelpad=-20) +ax2.set_ylabel("Circular\nfeature") +ax2.set_yticks([0, 2*np.pi], ["0", "2π"]) +plt.show() +``` + +### N-dimensional template matching + +```{code-cell} ipython3 +:tags: [hide-cell] +dt = 0.01 +T = 10 +epoch = nap.IntervalSet(start=0, end=T, time_units="s") +features = np.vstack((np.cos(np.arange(0, T, dt)), np.sin(np.arange(0, T, dt)))).T +features = nap.TsdFrame( + t=np.arange(0, T, dt), + d=features, + time_units="s", + time_support=epoch, + columns=["a", "b"], +) + + +# Calcium activity +ft = features.values +alpha = np.arctan2(ft[:, 1], ft[:, 0]) +bin_centers = np.linspace(-np.pi, np.pi, 12) +kappa = 4.0 +units=[] +for i, mu in enumerate(bin_centers): + units.append(np.exp(kappa * np.cos(alpha - mu))) # wrapped Gaussian +units = np.stack(units, axis=1) +tsdframe = nap.TsdFrame(t=features.times(), d=units) +``` + +Template matching also works with multiple dimensions. + +First, we compute the tuning curves (now let's simulate calcium imaging in a `TsdFrame`): + +```{code-cell} ipython3 +tuning_curves_2d = nap.compute_tuning_curves( + data=tsdframe, + features=features, # containing 2 features + bins=10, + epochs=epochs, + range=[(-1.0, 1.0), (-1.0, 1.0)], # range can be specified for each feature +) +``` + +```{code-cell} ipython3 +:tags: [hide-input] +tuning_curves_2d.name = "ΔF/F" +tuning_curves_2d.attrs["unit"] = "a.u." +tuning_curves_2d.plot(row="unit", col_wrap=6) +plt.show() +``` + +and then, `nap.decode_template` again performs template matching: + +```{code-cell} ipython3 +decoded, dist = nap.decode_template( + tuning_curves=tuning_curves_2d, + data=tsdframe, + epochs=epochs, + bin_size=0.2, + metric="correlation" +) +``` + +```{code-cell} ipython3 +:tags: [hide-input] +fig, (ax1, ax2, ax3) = plt.subplots(figsize=(8, 3), nrows=1, ncols=3, sharey=True) +ax1.plot(features["a"].get(0, 20), label="True") +ax1.scatter( + decoded["a"].get(0, 20).times(), + decoded["a"].get(0, 20), + label="Decoded", + c="orange", +) +ax1.set_title("Feature a") +ax1.set_xlabel("Time (s)") + +ax2.plot(features["b"].get(0, 20), label="True") +ax2.scatter( + decoded["b"].get(0, 20).times(), + decoded["b"].get(0, 20), + label="Decoded", + c="orange", +) +ax2.set_xlabel("Time (s)") +ax2.set_title("Feature b") + +ax3.plot( + features["a"].get(0, 20), + features["b"].get(0, 20), + label="True", +) +ax3.scatter( + decoded["a"].get(0, 20), + decoded["b"].get(0, 20), + label="Decoded", + c="orange", +) +ax3.set_title("Combined") +plt.show() +``` diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index de497519f..886a14011 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -60,10 +60,17 @@ def wrapper(*args, **kwargs): return wrapper -def _format_decoding_outputs(p, tuning_curves, data, epochs): - idxmax = np.argmax(p, 1) +def _format_decoding_outputs(p, tuning_curves, data, epochs, greater_is_better): + # Get the index of the decoded class + filler = -np.inf if greater_is_better else np.inf + filled = np.where(np.isnan(p), filler, p) + idx = getattr(np, "argmax" if greater_is_better else "argmin")(filled, axis=1) - # Fromat probability distribution + # Replace with -1 where all values were NaN + all_nan = np.isnan(p).all(axis=1) + idx[all_nan] = -1 + + # Format probability distribution p = p.reshape(p.shape[0], *tuning_curves.shape[1:]) if p.ndim > 2: p = nap.TsdTensor( @@ -79,24 +86,32 @@ def _format_decoding_outputs(p, tuning_curves, data, epochs): columns=tuning_curves.coords[tuning_curves.dims[1]].values, ) - # Format decoded - idxmax = np.unravel_index(idxmax, tuning_curves.shape[1:]) + # Format decoded index + shape = tuning_curves.shape[1:] + valid = idx != -1 + if tuning_curves.ndim == 2: + decoded_values = np.full(len(idx), np.nan) + decoded_values[valid] = tuning_curves.coords[tuning_curves.dims[1]].values[ + idx[valid] + ] decoded = nap.Tsd( t=data.index, - d=tuning_curves.coords[tuning_curves.dims[1]][idxmax[0]].values, + d=decoded_values, time_support=epochs, ) else: + # unravel valid indices only + unraveled = [np.full(len(idx), np.nan) for _ in shape] + unraveled_indices = np.unravel_index(idx[valid], shape) + for i in range(len(shape)): + unraveled[i][valid] = tuning_curves.coords[ + tuning_curves.dims[1 + i] + ].values[unraveled_indices[i]] + decoded = nap.TsdFrame( t=data.index, - d=np.stack( - [ - tuning_curves.coords[dim][idxmax[i]] - for i, dim in enumerate(tuning_curves.dims[1:]) - ], - axis=1, - ), + d=np.stack(unraveled, axis=1), time_support=epochs, columns=tuning_curves.dims[1:], ) @@ -154,7 +169,7 @@ def decode_bayes( Tuning curves as outputed by `compute_tuning_curves` (one for each unit). data : TsGroup or TsdFrame Neural activity with the same keys as the tuning curves. - You may also pass a TsdFrame with smoothed counts (recommended). + You may also pass a TsdFrame with smoothed counts. epochs : IntervalSet The epochs on which decoding is computed bin_size : float @@ -169,10 +184,9 @@ def decode_bayes( Returns ------- Tsd - The decoded feature + The decoded feature. TsdFrame, TsdTensor - The probability distribution of the decoded trajectory for each time bin - + The probability distribution of the decoded trajectory for each time bin. Examples -------- @@ -335,25 +349,23 @@ def decode_bayes( p = p1 * p2 * p3 p = p / p.sum(1)[:, np.newaxis] - return _format_decoding_outputs(p, tuning_curves, data, epochs) + return _format_decoding_outputs( + p, tuning_curves, data, epochs, greater_is_better=True + ) @_format_decoding_inputs def decode_template( - tuning_curves, data, epochs, bin_size, metric="euclidean", time_units="s" + tuning_curves, data, epochs, bin_size, metric="correlation", time_units="s" ): """ Performs template matching decoding over n-dimensional features. - The algorithm is as follows: + The algorithm is based on the following steps: 1. For every time bin, we compute the distance between the neural activity and the tuning curves using the chosen distance metric. - 2. We rescale the distance to [0,1]. - 3. We transform the distance to similarity = 1 - distance. - 4. We compute an estimated probability distribution by normalizing every time bin, - i.e. dividing by the sum over all feature bins. - 5. For every time bin, the decoded feature bin is the one that corresponds to the maximum estimated probability. + 5. For every time bin, the decoded feature bin is the one that lies closest to the tuning curves. See:\n Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. @@ -368,7 +380,7 @@ def decode_template( Tuning curves as outputed by `compute_tuning_curves` (one for each unit). data : TsGroup or TsdFrame Neural activity with the same keys as the tuning curves. - You may also pass a TsdFrame with smoothed counts (recommended). + You may also pass a TsdFrame with smoothed counts. epochs : IntervalSet The epochs on which decoding is computed bin_size : float @@ -390,8 +402,7 @@ def decode_template( Tsd The decoded feature TsdFrame, TsdTensor - The probability distribution of the decoded trajectory for each time bin - + The distance matrix between the neural activity and the tuning curves for each time bin. Examples -------- @@ -429,21 +440,19 @@ def decode_template( >>> p Time (s) 0.0 1.0 ---------- ----- ----- - 0.5 1.0 0.0 - 1.5 1.0 0.0 - 2.5 1.0 0.0 - 3.5 1.0 0.0 - 4.5 1.0 0.0 - 5.5 1.0 0.0 - 6.5 1.0 0.0 + 0.5 0.0 2.0 + 1.5 0.0 2.0 + 2.5 0.0 2.0 + 3.5 0.0 2.0 + 4.5 0.0 2.0 + 5.5 0.0 2.0 ... ... ... - 93.5 0.0 1.0 - 94.5 0.0 1.0 - 95.5 0.0 1.0 - 96.5 0.0 1.0 - 97.5 0.0 1.0 - 98.5 0.0 1.0 - 99.5 0.0 1.0 + 94.5 2.0 0.0 + 95.5 2.0 0.0 + 96.5 2.0 0.0 + 97.5 2.0 0.0 + 98.5 2.0 0.0 + 99.5 2.0 0.0 dtype: float64, shape: (100, 2) p is a `TsdFrame` object containing the probability distribution for each time bin. @@ -488,22 +497,20 @@ def decode_template( >>> p Time (s) - ---------- -------------- - 0.5 [[1., 0.] ...] - 1.5 [[0., 1.] ...] - 2.5 [[1., 0.] ...] - 3.5 [[0., 1.] ...] - 4.5 [[1., 0.] ...] - 5.5 [[0., 1.] ...] - 6.5 [[1., 0.] ...] + ---------- -------------------------- + 0.5 [[0. , 1.333333] ...] + 1.5 [[1.333333, 0. ] ...] + 2.5 [[0. , 1.333333] ...] + 3.5 [[1.333333, 0. ] ...] + 4.5 [[0. , 1.333333] ...] + 5.5 [[1.333333, 0. ] ...] ... - 93.5 [[0., 0.] ...] - 94.5 [[0., 0.] ...] - 95.5 [[0., 0.] ...] - 96.5 [[0., 0.] ...] - 97.5 [[0., 0.] ...] - 98.5 [[0., 0.] ...] - 99.5 [[0., 0.] ...] + 94.5 [[1.333333, 1.333333] ...] + 95.5 [[1.333333, 1.333333] ...] + 96.5 [[1.333333, 1.333333] ...] + 97.5 [[1.333333, 1.333333] ...] + 98.5 [[1.333333, 1.333333] ...] + 99.5 [[1.333333, 1.333333] ...] dtype: float64, shape: (100, 2, 2) and p is a `TsdTensor` object containing the probability distribution for each time bin. @@ -537,15 +544,13 @@ def decode_template( tc = tuning_curves.values.reshape(tuning_curves.sizes["unit"], -1).T ct = data.values - # compute distance matrix - dist = cdist(ct, tc, metric=metric) - # rescale to [0, 1] - dist = (dist - np.min(dist, keepdims=True)) / np.ptp(dist, keepdims=True) - # transform to similarity - sim = 1 - dist - # normalize - p = sim / sim.sum(axis=1, keepdims=True) - return _format_decoding_outputs(p, tuning_curves, data, epochs) + return _format_decoding_outputs( + cdist(ct, tc, metric=metric), + tuning_curves, + data, + epochs, + greater_is_better=False, + ) # ------------------------------------------------------------------------------------- diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 4ae4459c3..08e83e1e1 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -236,7 +236,7 @@ def test_decode_template(n_features, binned, metric): features, tuning_curves, data, epochs, bin_size = get_testing_set_n( n_features, binned=binned ).values() - decoded, proba = nap.decode_template( + decoded, dist = nap.decode_template( tuning_curves=tuning_curves, data=data, epochs=epochs, @@ -249,15 +249,9 @@ def test_decode_template(n_features, binned, metric): np.testing.assert_array_almost_equal(decoded.values, features.values.squeeze()) assert isinstance( - proba, + dist, nap.TsdFrame if features.shape[1] == 1 else nap.TsdTensor, ) - expected_proba = np.zeros((len(features), *tuning_curves.shape[1:])) - target_indices = [np.arange(len(features))] + [ - features[:, d] for d in range(features.shape[1]) - ] - expected_proba[tuple(target_indices)] = 1.0 - np.testing.assert_array_almost_equal(proba.values, expected_proba) # ------------------------------------------------------------------------------------ From 720e5bffe8b6f5f892bf56d6361993490149bf62 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 1 Aug 2025 21:54:10 +0000 Subject: [PATCH 104/244] start on adding decoding to calcium imaging notebook --- doc/examples/tutorial_calcium_imaging.md | 48 ++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index be6c05bb1..aa215b671 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -23,6 +23,7 @@ The NWB file for the example is hosted on [OSF](https://osf.io/sbnaw). We show b ```{code-cell} ipython3 :tags: [hide-output] +import numpy as np import pynapple as nap import matplotlib.pyplot as plt import seaborn as sns @@ -152,6 +153,53 @@ ax2.set_title("Second half") plt.show() ``` +*** +Calcium decoding +--------------------- + +Given some tuning curves, we can also try to decode head direction from the population. +For calcium imaging data, Pynapple has `decode_template`, which implements a template matching algorithm. + +```{code-cell} ipython3 +epochs = nap.IntervalSet([10, 100]) +decoded, dist = nap.decode_template( + tuning_curves=tuning_curves, + data=transients, + epochs=epochs, + bin_size=0.1, + metric="correlation" +) +``` + +```{code-cell} ipython3 +fig, (ax1, ax2) = plt.subplots(figsize=(8, 5), nrows=2, ncols=1, sharex=True) +angle=angle.restrict(epochs) +ax1.plot( + angle.times(), + angle.values, + label="True", +) +ax1.scatter( + decoded.times(), + decoded.values, + label="Decoded", + c="orange", +) +ax1.legend( + frameon=False, + bbox_to_anchor=(1.0, 1.0), +) +ax1.set_ylabel("Angle [rad]") +#ax1.set_yticks([0, 2*np.pi], ["0", "2π"]) +im = ax2.imshow(dist.values.T, aspect="auto", origin="lower", cmap="inferno_r", extent=(10.0, 100.0, 0.0, 2*np.pi)) +cbar_ax = fig.add_axes([0.93, 0.1, 0.015, 0.36]) +fig.colorbar(im, cax=cbar_ax, label="Distance") +ax2.set_xlabel("Time (s)", labelpad=-20) +ax2.set_ylabel("Angle [rad]") +#ax2.set_yticks([0, 2*np.pi], ["0", "2π"]) +plt.show() +``` + :::{card} Authors ^^^ From 7ee5e97ea13e42dd23314bce4c40a181b61a5d48 Mon Sep 17 00:00:00 2001 From: Jacob Date: Thu, 3 Jul 2025 17:57:03 -0400 Subject: [PATCH 105/244] Add detect_oscillatory_events --- doc/user_guide/12_filtering.md | 75 +++++++++++++++++++++++++++++- pynapple/process/filtering.py | 83 +++++++++++++++++++++++++++++++++- tests/test_filtering.py | 51 ++++++++++++++++++++- 3 files changed, 206 insertions(+), 3 deletions(-) diff --git a/doc/user_guide/12_filtering.md b/doc/user_guide/12_filtering.md index c187bfbc9..fb6a4e1ac 100644 --- a/doc/user_guide/12_filtering.md +++ b/doc/user_guide/12_filtering.md @@ -14,7 +14,7 @@ kernelspec: Filtering ========= -The filtering module holds the functions for frequency manipulation : +The filtering module holds the functions for frequency manipulation: - [`nap.apply_bandstop_filter`](pynapple.process.filtering.apply_bandstop_filter) - [`nap.apply_lowpass_filter`](pynapple.process.filtering.apply_lowpass_filter) @@ -377,3 +377,76 @@ plt.xlabel("Number of dimensions") plt.ylabel("Time (s)") plt.title("Low pass filtering benchmark") ``` + + +*** +Detecting Oscillatory Events +--------------------------- + +The filtering module also provides a method [`detect_oscillatory_events`](pynapple.process.filtering.detect_oscillatory_events) to automatically detect intervals containing oscillatory events (such as ripples or spindles) in a signal. + +To demonstrate, let's create a synthetic signal where a fast oscillation (e.g., 40 Hz) occurs in a noisy signal: + +```{code-cell} ipython3 +# Parameters +fs = 1000 # Sampling frequency (Hz) +duration = 3 # seconds +t = np.linspace(0, duration, int(fs * duration)) + +# 40 Hz oscillation +osc = np.sin(2 * np.pi * 40 * t) +signal = np.zeros_like(t) + 0.2 * np.random.randn(len(t)) +mask = (t > 1) & (t < 1.5) +signal[mask] += osc[mask] + +# Create Tsd object +ts = nap.Tsd(t=t, d=signal) +``` + +```{code-cell} ipython3 +:tags: [hide-input] + +# Plot the signal +plt.figure(figsize=(15, 4)) +plt.plot(ts, label="Signal (40 Hz oscillation)") +plt.xlabel("Time (s)") +plt.ylabel("Amplitude") +plt.title("Signal with oscillatory bursts") +plt.legend() +plt.show() +``` + +Now, let's use [`detect_oscillatory_events`](pynapple.process.filtering.detect_oscillatory_events) to find the oscillation intervals. The function will return the detected intervals as an `IntervalSet` along with metadata containing peak times. + +```{code-cell} ipython3 +# Define detection parameters +freq_band = (35, 45) # Bandpass filter for 40 Hz +thres_band = (1, 10) # Thresholds for normalized squared signal +min_dur = 0.03 # Minimum event duration (s) +max_dur = 1 # Maximum event duration (s) +min_inter = 0.02 # Minimum interval between events (s) +epoch = nap.IntervalSet(start=0, end=duration) + +# Detect oscillatory events +osc_ep = nap.filtering.detect_oscillatory_events( + ts, epoch, freq_band, thres_band, (min_dur, max_dur), min_inter +) + +print("Detected intervals:\n", osc_ep) +``` + +Let's visualize the detected intervals and peaks on the original signal: + +```{code-cell} ipython3 +:tags: [hide-input] + +plt.figure(figsize=(15, 4)) +plt.plot(ts, label="Signal") +for s, e in osc_ep.values: + plt.axvspan(s, e, color="orange", alpha=0.3) +plt.xlabel("Time (s)") +plt.ylabel("Amplitude") +plt.title("Detected oscillatory events") +plt.legend() +plt.show() +``` diff --git a/pynapple/process/filtering.py b/pynapple/process/filtering.py index 5fe05a4e8..384544330 100644 --- a/pynapple/process/filtering.py +++ b/pynapple/process/filtering.py @@ -7,7 +7,7 @@ import numpy as np import pandas as pd -from scipy.signal import butter, sosfiltfilt, sosfreqz +from scipy.signal import butter, filtfilt, sosfiltfilt, sosfreqz from .. import core as nap @@ -510,3 +510,84 @@ def get_filter_frequency_response( ) else: raise ValueError("Unrecognized filter mode. Choose either 'butter' or 'sinc'") + + +def detect_oscillatory_events( + lfp, epoch, freq_band, thresh_band, duration_band, min_inter_duration, wsize=51 +): + """ + Simple helper for detecting oscillatory events (e.g. ripples, spindles) + + Parameters + ---------- + lfp : Tsd + Should be a single channel raw lfp + epoch : IntervalSet + The epoch for restricting the detection + freq_band : tuple + The (low, high) frequency to bandpass the signal + thresh_band : tuple + The (min, max) value for thresholding the normalized squared signal after filtering + duration_band : tuple + The (min, max) duration of an event in second + min_inter_duration : float + The minimum duration between two events otherwise they are merged (in seconds) + wsize : int, optional + The size of the window for digitial filtering + + Returns + ------- + IntervalSet + The interval set of detected events with metadata containing + the power, amplitude, and peak_time + """ + lfp = lfp.restrict(epoch) + frequency = lfp.rate + signal = apply_bandpass_filter(lfp, freq_band, frequency) + squared_signal = np.square(signal.values) + window = np.ones(wsize) / wsize + + nSS = filtfilt(window, 1, squared_signal) + nSS = (nSS - np.mean(nSS))/np.std(nSS) + nSS = nap.Tsd(t = signal.index.values, d=nSS, time_support=epoch) + + # Detect oscillation periods by thresholding normalized signal + nSS2 = nSS.threshold(thresh_band[0], method='above') + nSS3 = nSS2.threshold(thresh_band[1], method='below') + + # Exclude oscillation where min_duration < length < max_duration + osc_ep = nSS3.time_support + osc_ep = osc_ep.drop_short_intervals(duration_band[0], time_units = 's') + osc_ep = osc_ep.drop_long_intervals(duration_band[1], time_units = 's') + + # Merge if inter-oscillation period is too short + osc_ep = osc_ep.merge_close_intervals(min_inter_duration, time_units = 's') + + # Compute power, amplitude, and peak_time for each interval + powers = [] + amplitudes = [] + peak_times = [] + + for s, e in osc_ep.values: + seg = signal.restrict(nap.IntervalSet(s, e)) + if len(seg) == 0: + powers.append(np.nan) + amplitudes.append(np.nan) + peak_times.append(np.nan) + continue + power = np.mean(np.square(seg)) + power_db = 10 * np.log10(power) + amplitude = np.max(np.abs(seg.values)) + peak_idx = np.argmax(np.abs(seg.values)) + peak_time = seg.index.values[peak_idx] + powers.append(power_db) + amplitudes.append(amplitude) + peak_times.append(peak_time) + + metadata = { + "power": powers, + "amplitude": amplitudes, + "peak_time": peak_times, + } + + return nap.IntervalSet(start=osc_ep.start, end=osc_ep.end, metadata=metadata) diff --git a/tests/test_filtering.py b/tests/test_filtering.py index 81e9a4aec..cbdcace64 100644 --- a/tests/test_filtering.py +++ b/tests/test_filtering.py @@ -481,7 +481,7 @@ def test_compare_sinc_kernel(): ikernel = nap.process.filtering._compute_spectral_inversion(kernel) ikernel2 = kernel2 * -1.0 - ikernel2[len(ikernel2) // 2] = 1.0 + ikernel2[len(kernel2) // 2] + ikernel2[len(kernel2) // 2] = 1.0 + ikernel2[len(kernel2) // 2] np.testing.assert_allclose(ikernel, ikernel2) @@ -523,3 +523,52 @@ def test_get_filter_frequency_response_error(): ValueError, match="Unrecognized filter mode. Choose either 'butter' or 'sinc'" ): nap.get_filter_frequency_response(250, 1000, "lowpass", "a", 4, 0.02) + + +@pytest.mark.parametrize( + "freq_band, thresh_band, start, end", + [ + ((10, 30), (1, 10), 0, 2), + ((40, 60), (1, 10), 3, 5), + ], +) +def test_detect_oscillatory_events(freq_band, thresh_band, start, end): + fs = 1000 + duration = 5 + min_dur = 0.1 + max_dur = 2 + min_inter = 0.02 + + t = np.linspace(0, duration, int(fs * duration), endpoint=False) + signal = np.zeros_like(t) + + # 25 Hz oscillation from 0-2s + freq_1 = 25 + mask1 = (t >= 0) & (t < 2) + signal[mask1] = np.sin(2 * np.pi * freq_1 * t[mask1]) + + # 50 Hz oscillation from 3-5s + freq_2 = 50 + mask2 = (t >= 3) & (t < 5) + signal[mask2] = np.sin(2 * np.pi * freq_2 * t[mask2]) + + ts = nap.Tsd(t=t, d=signal) + epoch = nap.IntervalSet(start=0, end=duration) + osc_ep = nap.filtering.detect_oscillatory_events( + ts, epoch, freq_band, thresh_band, (min_dur, max_dur), min_inter) + + assert len(osc_ep) == 1 # Only one event in given freq_band + + # Start and end should be close to actuals +/- a small amount + detected_start = osc_ep.start[0] + detected_end = osc_ep.end[0] + assert np.isclose(start, detected_start, atol=0.05) + assert np.isclose(end, detected_end, atol=0.05) + + # Check we store power, amplitude, and peak_time + for key in ["power", "amplitude", "peak_time"]: + assert key in osc_ep._metadata + + # Check peak_time is within the interval + peak_time = osc_ep._metadata["peak_time"][0] + assert start <= peak_time <= end From d3a25b9cdd4715ec5ac29146ba9177335bb9d18b Mon Sep 17 00:00:00 2001 From: Jacob Date: Sat, 2 Aug 2025 14:44:19 -0400 Subject: [PATCH 106/244] Fix lint issues --- pynapple/process/filtering.py | 14 +++++++------- tests/test_filtering.py | 5 +++-- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/pynapple/process/filtering.py b/pynapple/process/filtering.py index 384544330..44af76bf6 100644 --- a/pynapple/process/filtering.py +++ b/pynapple/process/filtering.py @@ -548,20 +548,20 @@ def detect_oscillatory_events( window = np.ones(wsize) / wsize nSS = filtfilt(window, 1, squared_signal) - nSS = (nSS - np.mean(nSS))/np.std(nSS) - nSS = nap.Tsd(t = signal.index.values, d=nSS, time_support=epoch) + nSS = (nSS - np.mean(nSS)) / np.std(nSS) + nSS = nap.Tsd(t=signal.index.values, d=nSS, time_support=epoch) # Detect oscillation periods by thresholding normalized signal - nSS2 = nSS.threshold(thresh_band[0], method='above') - nSS3 = nSS2.threshold(thresh_band[1], method='below') + nSS2 = nSS.threshold(thresh_band[0], method="above") + nSS3 = nSS2.threshold(thresh_band[1], method="below") # Exclude oscillation where min_duration < length < max_duration osc_ep = nSS3.time_support - osc_ep = osc_ep.drop_short_intervals(duration_band[0], time_units = 's') - osc_ep = osc_ep.drop_long_intervals(duration_band[1], time_units = 's') + osc_ep = osc_ep.drop_short_intervals(duration_band[0], time_units="s") + osc_ep = osc_ep.drop_long_intervals(duration_band[1], time_units="s") # Merge if inter-oscillation period is too short - osc_ep = osc_ep.merge_close_intervals(min_inter_duration, time_units = 's') + osc_ep = osc_ep.merge_close_intervals(min_inter_duration, time_units="s") # Compute power, amplitude, and peak_time for each interval powers = [] diff --git a/tests/test_filtering.py b/tests/test_filtering.py index cbdcace64..d54c44825 100644 --- a/tests/test_filtering.py +++ b/tests/test_filtering.py @@ -481,7 +481,7 @@ def test_compare_sinc_kernel(): ikernel = nap.process.filtering._compute_spectral_inversion(kernel) ikernel2 = kernel2 * -1.0 - ikernel2[len(kernel2) // 2] = 1.0 + ikernel2[len(kernel2) // 2] + ikernel2[len(ikernel2) // 2] = 1.0 + ikernel2[len(kernel2) // 2] np.testing.assert_allclose(ikernel, ikernel2) @@ -555,7 +555,8 @@ def test_detect_oscillatory_events(freq_band, thresh_band, start, end): ts = nap.Tsd(t=t, d=signal) epoch = nap.IntervalSet(start=0, end=duration) osc_ep = nap.filtering.detect_oscillatory_events( - ts, epoch, freq_band, thresh_band, (min_dur, max_dur), min_inter) + ts, epoch, freq_band, thresh_band, (min_dur, max_dur), min_inter + ) assert len(osc_ep) == 1 # Only one event in given freq_band From b57bab02b12a34b2c540293ba99733c919b50d56 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Sat, 2 Aug 2025 23:35:08 +0000 Subject: [PATCH 107/244] simpler smoothing code in tutorial --- doc/examples/tutorial_HD_dataset.md | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/doc/examples/tutorial_HD_dataset.md b/doc/examples/tutorial_HD_dataset.md index 63817f069..806dc1156 100644 --- a/doc/examples/tutorial_HD_dataset.md +++ b/doc/examples/tutorial_HD_dataset.md @@ -141,15 +141,12 @@ To make the tuning curves look nice, we will smooth them before plotting: ```{code-cell} ipython3 from scipy.ndimage import gaussian_filter1d -tmp = np.concatenate( - [ - tuning_curves.values, - tuning_curves.values, - tuning_curves.values - ], - axis=1) -tmp = gaussian_filter1d(tmp, sigma=3, axis=1) -tuning_curves.values = tmp[:, tuning_curves.shape[1]:2*tuning_curves.shape[1]] +tuning_curves.values = gaussian_filter1d( + tuning_curves.values, + sigma=3, + axis=1, + mode="wrap" # important for circular variables! +) ``` What does this look like? Let's plot them! @@ -180,7 +177,7 @@ Now that we have HD tuning curves, we can go one step further. Using only the po We will then compare this to the real head-direction of the animal, and discover that population activity in the ADn indeed codes for HD. To decode the population activity, we will be using a bayesian decoder as implemented in Pynapple. -Again, just a single line of code! +Again, just a single line of code: ```{code-cell} ipython3 decoded, proba_feature = nap.decode_bayes( @@ -197,7 +194,7 @@ What does this look like? print(decoded) ``` -The variable 'decoded' contains the most probable angle, and 'proba_feature' contains the probability of a given angular bin at a given time point: +The variable ``decoded`` contains the most probable angle, and ``proba_feature`` contains the probability of a given angular bin at a given time point: ```{code-cell} ipython3 print(proba_feature) @@ -227,10 +224,10 @@ plt.ylabel("Neurons") plt.show() ``` -From this plot, we can see that the decoder is able to estimate the head-direction based on the population activity in ADn. Amazing! +From this plot, we can see that the decoder is able to estimate the head-direction based on the population activity in ADn. -What does the probability distribution in this example event look like? -Ideally, the bins with the highest probability will correspond to the bins having the most spikes. Let's plot the probability matrix to visualize this. +We can also visualize the probability distribution. +Ideally, the bins with the highest probability correspond to the bins with the most spikes. ```{code-cell} ipython3 smoothed = scipy.ndimage.gaussian_filter( @@ -270,8 +267,7 @@ plt.show() ``` -From this probability distribution, we observe that the decoded HD closely matches the actual HD. -Hence, the population activity in ADn is a reliable estimate of the heading direction of the animal. +The decoded HD closely matches the actual HD, and thus the population activity in ADn is a reliable estimate of the heading direction of the animal. I hope this tutorial was helpful. If you have any questions, comments or suggestions, please feel free to reach out to the Pynapple Team! From a4c665c6be5025a910a18943b623e0eb57095b20 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Sat, 2 Aug 2025 23:39:32 +0000 Subject: [PATCH 108/244] cleaning up tutorial --- doc/examples/tutorial_HD_dataset.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/doc/examples/tutorial_HD_dataset.md b/doc/examples/tutorial_HD_dataset.md index 806dc1156..3f92f2f9c 100644 --- a/doc/examples/tutorial_HD_dataset.md +++ b/doc/examples/tutorial_HD_dataset.md @@ -139,9 +139,7 @@ color = xr.DataArray( To make the tuning curves look nice, we will smooth them before plotting: ```{code-cell} ipython3 -from scipy.ndimage import gaussian_filter1d - -tuning_curves.values = gaussian_filter1d( +tuning_curves.values = scipy.ndimage.gaussian_filter1d( tuning_curves.values, sigma=3, axis=1, @@ -232,7 +230,7 @@ Ideally, the bins with the highest probability correspond to the bins with the m ```{code-cell} ipython3 smoothed = scipy.ndimage.gaussian_filter( proba_feature, 1 -) # Smoothening the probability distribution +) # Smoothing the probability distribution # Create a DataFrame with the smoothed distribution p_feature = pd.DataFrame( From 48d37adb100d8bfdf6814672027bd66c82592cdb Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 4 Aug 2025 20:00:48 +0000 Subject: [PATCH 109/244] add decoding to calcium tutorial --- doc/examples/tutorial_calcium_imaging.md | 158 ++++++++++++++++++----- 1 file changed, 128 insertions(+), 30 deletions(-) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index aa215b671..c8f6b35a5 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -92,7 +92,6 @@ As you can see, we have a longer recording for our tracking of the animal's head ```{code-cell} ipython3 transients.time_support -angle.time_support ``` *** @@ -134,21 +133,21 @@ We start by finding the midpoint of the recording, using the function [`get_inte center = transients.time_support.get_intervals_center() halves = nap.IntervalSet( - start = [transients.time_support.start[0], center.t[0]], + start = [transients.time_support.start[0], center.t[0]], end = [center.t[0], transients.time_support.end[0]] - ) +) ``` Now, we can compute the tuning curves for each half of the recording and plot the tuning curves again. ```{code-cell} ipython3 -half1 = nap.compute_tuning_curves(transients, angle, bins = 120, epochs = halves.loc[[0]]) -half2 = nap.compute_tuning_curves(transients, angle, bins = 120, epochs = halves.loc[[1]]) +tuning_curves_half1 = nap.compute_tuning_curves(transients, angle, bins = 120, epochs = halves.loc[[0]]) +tuning_curves_half2 = nap.compute_tuning_curves(transients, angle, bins = 120, epochs = halves.loc[[1]]) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5)) -set_metadata(half1[4]).plot(ax=ax1) +set_metadata(tuning_curves_half1[4]).plot(ax=ax1) ax1.set_title("First half") -set_metadata(half2[4]).plot(ax=ax2) +set_metadata(tuning_curves_half2[4]).plot(ax=ax2) ax2.set_title("Second half") plt.show() ``` @@ -161,7 +160,7 @@ Given some tuning curves, we can also try to decode head direction from the popu For calcium imaging data, Pynapple has `decode_template`, which implements a template matching algorithm. ```{code-cell} ipython3 -epochs = nap.IntervalSet([10, 100]) +epochs = nap.IntervalSet([50, 150]) decoded, dist = nap.decode_template( tuning_curves=tuning_curves, data=transients, @@ -172,34 +171,133 @@ decoded, dist = nap.decode_template( ``` ```{code-cell} ipython3 -fig, (ax1, ax2) = plt.subplots(figsize=(8, 5), nrows=2, ncols=1, sharex=True) -angle=angle.restrict(epochs) -ax1.plot( - angle.times(), - angle.values, - label="True", -) -ax1.scatter( - decoded.times(), - decoded.values, - label="Decoded", - c="orange", -) -ax1.legend( - frameon=False, - bbox_to_anchor=(1.0, 1.0), +:tags: [hide-input] +# normalize distance for better visualization +dist_norm = (dist - np.min(dist.values, axis=1, keepdims=True)) / np.ptp( + dist.values, axis=1, keepdims=True ) + +fig, (ax1, ax2, ax3) = plt.subplots(figsize=(8, 8), nrows=3, ncols=1, sharex=True) +ax1.plot(angle.restrict(epochs), label="True") +ax1.scatter(decoded.times(), decoded.values, label="Decoded", c="orange") +ax1.legend(frameon=False, bbox_to_anchor=(1.0, 1.0)) ax1.set_ylabel("Angle [rad]") -#ax1.set_yticks([0, 2*np.pi], ["0", "2π"]) -im = ax2.imshow(dist.values.T, aspect="auto", origin="lower", cmap="inferno_r", extent=(10.0, 100.0, 0.0, 2*np.pi)) -cbar_ax = fig.add_axes([0.93, 0.1, 0.015, 0.36]) -fig.colorbar(im, cax=cbar_ax, label="Distance") -ax2.set_xlabel("Time (s)", labelpad=-20) + +im = ax2.imshow( + dist.values.T, + aspect="auto", + origin="lower", + cmap="inferno_r", + extent=(epochs.start[0], epochs.end[0], 0.0, 2*np.pi) +) ax2.set_ylabel("Angle [rad]") -#ax2.set_yticks([0, 2*np.pi], ["0", "2π"]) +cbar_ax2 = fig.add_axes([0.95, ax2.get_position().y0, 0.015, ax2.get_position().height]) +fig.colorbar(im, cax=cbar_ax2, label="Distance") + +im = ax3.imshow( + dist_norm.values.T, + aspect="auto", + origin="lower", + cmap="inferno_r", + extent=(epochs.start[0], epochs.end[0], 0.0, 2*np.pi) +) +cbar_ax3 = fig.add_axes([0.95, ax3.get_position().y0, 0.015, ax3.get_position().height]) +fig.colorbar(im, cax=cbar_ax3, label="Distance") +ax3.set_xlabel("Time (s)") +ax3.set_ylabel("Angle [rad]") +plt.show() +``` + +The distance metric you choose can influence how well we decode. +Internally, ``decode_template`` uses `scipy.spatial.distance.cdist` to compute the distance matrix, +you can take a look at [its documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html) +to see which metrics are supported, here are a couple examples: + +```{code-cell} ipython3 +:tags: [hide-input] +metrics = [ + "correlation", + "cosine", + "euclidean", + "braycurtis", + "canberra", +] + +fig, axs = plt.subplots(len(metrics), 1, figsize=(8,8), sharex=True) +for metric, ax in zip(metrics, axs): + decoded, dist = nap.decode_template( + tuning_curves=tuning_curves, + data=transients, + bin_size=0.1, + metric=metric, + epochs=epochs + ) + im = ax.imshow( + dist.values.T, + aspect="auto", + origin="lower", + cmap="inferno_r", + extent=(epochs.start[0], epochs.end[0], 0.0, 2*np.pi) + ) + plt.colorbar(im, cmap="inferno", label=metric) +ax.set_xlabel("Time (s)") +ax.set_ylabel("Angle [rad]") +plt.show() +``` + +We recommend to try out a bunch and see which one works best for you. +In the case of head direction, we can quantify how well we decode using the absolute angular error. +To get a fair estimate of error, we will compute the tuning curves on the first half of the data +and compute the error for predictions of the second half. + +```{code-cell} ipython3 +def absolute_angular_error(x, y): + return np.abs(np.angle(np.exp(1j * (x - y)))) + +# Compute errors +errors = {} +for metric in metrics: + decoded, dist = nap.decode_template( + tuning_curves=tuning_curves_half1, + data=transients, + bin_size=0.1, + metric=metric, + epochs=halves.loc[[1]] + ) + errors[metric] = absolute_angular_error( + angle.interpolate(decoded).values, decoded.values + ) +``` + +```{code-cell} ipython3 +:tags: [hide-input] +# Visualize +fig, ax = plt.subplots(figsize=(8, 4)) +bp = ax.boxplot( + x=errors.values(), + tick_labels=errors.keys(), + showfliers=False +) +ax.set_ylabel("Angle [rad]") + +# Add median labels +for i, line in enumerate(bp['medians']): + median_y = line.get_ydata()[0] + median_x = line.get_xdata().mean() + ax.text( + median_x, + median_y + 0.1, + f"{median_y:.3f}", + va="center", + ha="center", + fontsize=9, + color="black", + ) plt.show() ``` +In this case, the `braycurtis` distance yields the lowest angular error. + :::{card} Authors ^^^ From fc04c764add34a56175e5453ce78031bc7463338 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 4 Aug 2025 20:10:03 +0000 Subject: [PATCH 110/244] clean docstring --- pynapple/process/decoding.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 886a14011..1c88168d0 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -361,11 +361,8 @@ def decode_template( """ Performs template matching decoding over n-dimensional features. - The algorithm is based on the following steps: - - 1. For every time bin, we compute the distance between the neural activity - and the tuning curves using the chosen distance metric. - 5. For every time bin, the decoded feature bin is the one that lies closest to the tuning curves. + The algorithm computes the distance between the observed neural activity and the tuning curves for every time bin. + The decoded feature at each time bin corresponds to the tuning curve bin with the smallest distance. See:\n Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. From 212278db3017ff0ae460d6c848f8b84b4ab5753e Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 4 Aug 2025 20:55:32 +0000 Subject: [PATCH 111/244] sort boxplots + label fix --- doc/examples/tutorial_calcium_imaging.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index c8f6b35a5..be06b75c0 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -202,7 +202,7 @@ im = ax3.imshow( extent=(epochs.start[0], epochs.end[0], 0.0, 2*np.pi) ) cbar_ax3 = fig.add_axes([0.95, ax3.get_position().y0, 0.015, ax3.get_position().height]) -fig.colorbar(im, cax=cbar_ax3, label="Distance") +fig.colorbar(im, cax=cbar_ax3, label="Norm. distance") ax3.set_xlabel("Time (s)") ax3.set_ylabel("Angle [rad]") plt.show() @@ -271,14 +271,16 @@ for metric in metrics: ```{code-cell} ipython3 :tags: [hide-input] -# Visualize +sorted_items = sorted(errors.items(), key=lambda item: np.median(item[1])) +sorted_labels, sorted_values = zip(*sorted_items) + fig, ax = plt.subplots(figsize=(8, 4)) bp = ax.boxplot( - x=errors.values(), - tick_labels=errors.keys(), + x=sorted_values, + tick_labels=sorted_labels, showfliers=False ) -ax.set_ylabel("Angle [rad]") +ax.set_ylabel("Angular error [rad]") # Add median labels for i, line in enumerate(bp['medians']): From cf7cdd3afb93567ea83c48e26440eb49406ad4a5 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 5 Aug 2025 14:50:47 +0000 Subject: [PATCH 112/244] update htmlproofer args --- .github/workflows/main.yml | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index b88cff142..96456baed 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -89,9 +89,19 @@ jobs: uses: chabad360/htmlproofer@master with: directory: "doc/_build/html" - # The directory to scan - arguments: --checks Links,Scripts --ignore-urls "https://fonts.gstatic.com,https://mkdocs-gallery.github.io,./doc/_build/html/_static/,https://www.nature.com/articles/s41593-022-01020-w" --assume-extension --check-external-hash --ignore-status-codes 403 --ignore-files "/.+\/html\/_static\/.+/" - # The arguments to pass to HTMLProofer + arguments: >- + --checks Links,Scripts + --assume-extension + --check-external-hash + --ignore-urls + https://fonts.gstatic.com, + https://mkdocs-gallery.github.io, + ./doc/_build/html/_static/, + https://www.nature.com/articles/s41593-022-01020-w, + https://elifesciences.org/ + --ignore-status-codes 403 + --ignore-files "/.+\/html\/_static\/.+/" + --browser-user-agent "Mozilla/5.0 (X11; Linux x86_64)" check: if: always() From a9b9165a678c71d102cb0c318951750ae5f730f4 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 5 Aug 2025 15:08:03 +0000 Subject: [PATCH 113/244] htmlproofer settings --- .github/workflows/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 96456baed..cdf8f8ce7 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -101,7 +101,6 @@ jobs: https://elifesciences.org/ --ignore-status-codes 403 --ignore-files "/.+\/html\/_static\/.+/" - --browser-user-agent "Mozilla/5.0 (X11; Linux x86_64)" check: if: always() From 2b699f960bcbd842ab8e90c34892c3686b2f2535 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 5 Aug 2025 15:25:03 +0000 Subject: [PATCH 114/244] htmlproofer ignore 406 --- .github/workflows/main.yml | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index cdf8f8ce7..d0374bf55 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -89,18 +89,15 @@ jobs: uses: chabad360/htmlproofer@master with: directory: "doc/_build/html" - arguments: >- - --checks Links,Scripts - --assume-extension - --check-external-hash - --ignore-urls - https://fonts.gstatic.com, - https://mkdocs-gallery.github.io, - ./doc/_build/html/_static/, - https://www.nature.com/articles/s41593-022-01020-w, - https://elifesciences.org/ - --ignore-status-codes 403 + # The directory to scan + arguments: + --checks Links,Scripts + --ignore-urls "https://fonts.gstatic.com,https://mkdocs-gallery.github.io,./doc/_build/html/_static/,https://www.nature.com/articles/s41593-022-01020-w" + --assume-extension + --check-external-hash + --ignore-status-codes 403, 406 --ignore-files "/.+\/html\/_static\/.+/" + # The arguments to pass to HTMLProofer check: if: always() From 981bf4b60bc6444e15904809ead473d40e9b154b Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 5 Aug 2025 16:13:14 +0000 Subject: [PATCH 115/244] add url to htmlproofer ignore --- .github/workflows/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d0374bf55..33e645193 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -92,10 +92,10 @@ jobs: # The directory to scan arguments: --checks Links,Scripts - --ignore-urls "https://fonts.gstatic.com,https://mkdocs-gallery.github.io,./doc/_build/html/_static/,https://www.nature.com/articles/s41593-022-01020-w" + --ignore-urls "https://fonts.gstatic.com,https://mkdocs-gallery.github.io,./doc/_build/html/_static/,https://www.nature.com/articles/s41593-022-01020-w,https://elifesciences.org/reviewed-preprints/85786" --assume-extension --check-external-hash - --ignore-status-codes 403, 406 + --ignore-status-codes 403 --ignore-files "/.+\/html\/_static\/.+/" # The arguments to pass to HTMLProofer From 30d8b669bf590f82b49f55d828ea5a895c7d32fa Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 6 Aug 2025 16:41:07 +0000 Subject: [PATCH 116/244] add prior to template decoding + update docstring --- pynapple/process/decoding.py | 74 ++++++++++++++++++++++++++---------- tests/test_decoding.py | 4 +- 2 files changed, 57 insertions(+), 21 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 1c88168d0..6a2ee0ef3 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -60,28 +60,28 @@ def wrapper(*args, **kwargs): return wrapper -def _format_decoding_outputs(p, tuning_curves, data, epochs, greater_is_better): +def _format_decoding_outputs(dist, tuning_curves, data, epochs, greater_is_better): # Get the index of the decoded class filler = -np.inf if greater_is_better else np.inf - filled = np.where(np.isnan(p), filler, p) + filled = np.where(np.isnan(dist), filler, dist) idx = getattr(np, "argmax" if greater_is_better else "argmin")(filled, axis=1) # Replace with -1 where all values were NaN - all_nan = np.isnan(p).all(axis=1) + all_nan = np.isnan(dist).all(axis=1) idx[all_nan] = -1 # Format probability distribution - p = p.reshape(p.shape[0], *tuning_curves.shape[1:]) - if p.ndim > 2: - p = nap.TsdTensor( + dist = dist.reshape(dist.shape[0], *tuning_curves.shape[1:]) + if dist.ndim > 2: + dist = nap.TsdTensor( t=data.index, - d=p, + d=dist, time_support=epochs, ) else: - p = nap.TsdFrame( + dist = nap.TsdFrame( t=data.index, - d=p, + d=dist, time_support=epochs, columns=tuning_curves.coords[tuning_curves.dims[1]].values, ) @@ -116,7 +116,7 @@ def _format_decoding_outputs(p, tuning_curves, data, epochs, greater_is_better): columns=tuning_curves.dims[1:], ) - return decoded, p + return decoded, dist @_format_decoding_inputs @@ -166,7 +166,7 @@ def decode_bayes( Parameters ---------- tuning_curves : xr.DataArray - Tuning curves as outputed by `compute_tuning_curves` (one for each unit). + Tuning curves as outputed by `compute_tuning_curves`. data : TsGroup or TsdFrame Neural activity with the same keys as the tuning curves. You may also pass a TsdFrame with smoothed counts. @@ -356,11 +356,30 @@ def decode_bayes( @_format_decoding_inputs def decode_template( - tuning_curves, data, epochs, bin_size, metric="correlation", time_units="s" + tuning_curves, + data, + epochs, + bin_size, + metric="correlation", + time_units="s", + uniform_prior=True, ): """ Performs template matching decoding over n-dimensional features. + The algorithm decodes as follow: + + .. math:: + + \\hat{x}(t) = \\arg\\max\\limits_{x} dist(P(x)*f(x), n(t)) + + where: + + - :math:`f(x)` is the the tuning curve function. + - :math:`P(x)` is the prior, which can be uniform or based on the occupancy distribution. + - :math:`n(t)` is input neural activity at time :math:`t`. + - :math:`dist` is a distance metric. + The algorithm computes the distance between the observed neural activity and the tuning curves for every time bin. The decoded feature at each time bin corresponds to the tuning curve bin with the smallest distance. @@ -371,10 +390,13 @@ def decode_template( hippocampal place cells. Journal of neurophysiology, 79(2), 1017-1044. + See ``scipy.spatial.distance.cdist`` for available distance metrics and how they are computed: + https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html + Parameters ---------- tuning_curves : xr.DataArray - Tuning curves as outputed by `compute_tuning_curves` (one for each unit). + Tuning curves as outputed by `compute_tuning_curves`. data : TsGroup or TsdFrame Neural activity with the same keys as the tuning curves. You may also pass a TsdFrame with smoothed counts. @@ -384,15 +406,22 @@ def decode_template( Bin size. Default is second. Use the parameter time_units to change it. metric : str or callable, optional The distance metric to use for template matching. - This is passed to `scipy.spatial.distance.cdist`, + This is passed to `scipy.spatial.distance.cdist`. If a string, the distance function can be ‘braycurtis’, ‘canberra’, ‘chebyshev’, ‘cityblock’, ‘correlation’, ‘cosine’, ‘dice’, ‘euclidean’, ‘hamming’, ‘jaccard’, ‘jensenshannon’, ‘kulczynski1’, ‘mahalanobis’, ‘matching’, ‘minkowski’, ‘rogerstanimoto’, ‘russellrao’, ‘seuclidean’, ‘sokalmichener’, ‘sokalsneath’, ‘sqeuclidean’, ‘yule’. Default is 'correlation'. + + .. note:: + Some metrics may not be suitable for all types of data. + For example, if your tuning curves contain NaN values, you should not use 'hamming', as it does not handle NaNs. time_units : str, optional Time unit of the bin size ('s' [default], 'ms', 'us'). + uniform_prior : bool, optional + If True (default), uses a uniform distribution as a prior. + If False, scales the tuning curves using the occupancy distribution before computing distances. Returns ------- @@ -411,7 +440,7 @@ def decode_template( >>> feature = nap.Tsd(t=np.arange(0, 100, 1), d=np.repeat(np.arange(0, 2), 50)) >>> tuning_curves = nap.compute_tuning_curves(group, feature, bins=2, range=(-.5, 1.5)) >>> epochs = nap.IntervalSet([0, 100]) - >>> decoded, p = nap.decode_template(tuning_curves, group, epochs=epochs, bin_size=1) + >>> decoded, dist = nap.decode_template(tuning_curves, group, epochs=epochs, bin_size=1) >>> decoded Time (s) ---------- -- @@ -452,7 +481,7 @@ def decode_template( 99.5 2.0 0.0 dtype: float64, shape: (100, 2) - p is a `TsdFrame` object containing the probability distribution for each time bin. + dist is a `TsdFrame` object containing the distances for each time bin. The function also works for multiple features, in which case it does n-dimensional decoding: @@ -469,7 +498,7 @@ def decode_template( ... } ... ) >>> tuning_curves = nap.compute_tuning_curves(group, features, bins=2, range=[(-.5, 1.5)]*2) - >>> decoded, p = nap.decode_template(tuning_curves, group, epochs=epochs, bin_size=1) + >>> decoded, dist = nap.decode_template(tuning_curves, group, epochs=epochs, bin_size=1) >>> decoded Time (s) 0 1 ---------- --- --- @@ -492,7 +521,7 @@ def decode_template( decoded is now a `TsdFrame` object containing the decoded features for each time bin. - >>> p + >>> dist Time (s) ---------- -------------------------- 0.5 [[0. , 1.333333] ...] @@ -510,14 +539,14 @@ def decode_template( 99.5 [[1.333333, 1.333333] ...] dtype: float64, shape: (100, 2, 2) - and p is a `TsdTensor` object containing the probability distribution for each time bin. + and dist is a `TsdTensor` object containing the distances for each time bin. It is also possible to pass continuous values instead of spikes (e.g. calcium imaging): >>> time = np.arange(0,100, 0.1) >>> group = nap.TsdFrame(t=time, d=np.stack([time % 0.5, time %1], axis=1)) >>> tuning_curves = nap.compute_tuning_curves(group, features, bins=2, range=[(-.5, 1.5)]*2) - >>> decoded, p = nap.decode_bayes(tuning_curves, group, epochs=epochs, bin_size=1) + >>> decoded, dist = nap.decode_template(tuning_curves, group, epochs=epochs, bin_size=1) >>> decoded Time (s) 0 1 ---------- --- --- @@ -541,6 +570,11 @@ def decode_template( tc = tuning_curves.values.reshape(tuning_curves.sizes["unit"], -1).T ct = data.values + if not uniform_prior: + occupancy = tuning_curves.attrs["occupancy"].flatten() + occupancy /= occupancy.sum() + tc *= occupancy + return _format_decoding_outputs( cdist(ct, tc, metric=metric), tuning_curves, diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 08e83e1e1..892f08920 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -230,9 +230,10 @@ def test_decode_bayes(n_features, binned, uniform_prior): @pytest.mark.parametrize("metric", ["correlation", "euclidean", "cosine"]) +@pytest.mark.parametrize("uniform_prior", [True, False]) @pytest.mark.parametrize("n_features", [1, 2, 3]) @pytest.mark.parametrize("binned", [True, False]) -def test_decode_template(n_features, binned, metric): +def test_decode_template(n_features, binned, metric, uniform_prior): features, tuning_curves, data, epochs, bin_size = get_testing_set_n( n_features, binned=binned ).values() @@ -243,6 +244,7 @@ def test_decode_template(n_features, binned, metric): metric=metric, bin_size=bin_size, time_units="s", + uniform_prior=uniform_prior, ) assert isinstance(decoded, nap.Tsd if features.shape[1] == 1 else nap.TsdFrame) From f659dd3b5ab8496b7839b8834031bd099875201c Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Wed, 6 Aug 2025 20:08:03 +0000 Subject: [PATCH 117/244] better docstring, remove uniform_prior --- pynapple/process/decoding.py | 16 +++------------- tests/test_decoding.py | 4 +--- 2 files changed, 4 insertions(+), 16 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 6a2ee0ef3..bf360f3ec 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -362,7 +362,6 @@ def decode_template( bin_size, metric="correlation", time_units="s", - uniform_prior=True, ): """ Performs template matching decoding over n-dimensional features. @@ -371,12 +370,11 @@ def decode_template( .. math:: - \\hat{x}(t) = \\arg\\max\\limits_{x} dist(P(x)*f(x), n(t)) + \\hat{x}(t) = \\arg\\max\\limits_{x} dist(f(x), n(t)) where: - :math:`f(x)` is the the tuning curve function. - - :math:`P(x)` is the prior, which can be uniform or based on the occupancy distribution. - :math:`n(t)` is input neural activity at time :math:`t`. - :math:`dist` is a distance metric. @@ -419,9 +417,6 @@ def decode_template( For example, if your tuning curves contain NaN values, you should not use 'hamming', as it does not handle NaNs. time_units : str, optional Time unit of the bin size ('s' [default], 'ms', 'us'). - uniform_prior : bool, optional - If True (default), uses a uniform distribution as a prior. - If False, scales the tuning curves using the occupancy distribution before computing distances. Returns ------- @@ -567,16 +562,11 @@ def decode_template( 99.9 1.0 1.0 dtype: float64, shape: (1000, 2) """ - tc = tuning_curves.values.reshape(tuning_curves.sizes["unit"], -1).T + tc = tuning_curves.values.reshape(tuning_curves.sizes["unit"], -1) ct = data.values - if not uniform_prior: - occupancy = tuning_curves.attrs["occupancy"].flatten() - occupancy /= occupancy.sum() - tc *= occupancy - return _format_decoding_outputs( - cdist(ct, tc, metric=metric), + cdist(ct, tc.T, metric=metric), tuning_curves, data, epochs, diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 892f08920..08e83e1e1 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -230,10 +230,9 @@ def test_decode_bayes(n_features, binned, uniform_prior): @pytest.mark.parametrize("metric", ["correlation", "euclidean", "cosine"]) -@pytest.mark.parametrize("uniform_prior", [True, False]) @pytest.mark.parametrize("n_features", [1, 2, 3]) @pytest.mark.parametrize("binned", [True, False]) -def test_decode_template(n_features, binned, metric, uniform_prior): +def test_decode_template(n_features, binned, metric): features, tuning_curves, data, epochs, bin_size = get_testing_set_n( n_features, binned=binned ).values() @@ -244,7 +243,6 @@ def test_decode_template(n_features, binned, metric, uniform_prior): metric=metric, bin_size=bin_size, time_units="s", - uniform_prior=uniform_prior, ) assert isinstance(decoded, nap.Tsd if features.shape[1] == 1 else nap.TsdFrame) From 66cee66ce448f289281c3902e54c92ba81f51b51 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 7 Aug 2025 15:51:50 +0000 Subject: [PATCH 118/244] more metrics in calcium tutorial --- doc/examples/tutorial_calcium_imaging.md | 50 +++++++++++++++++------- 1 file changed, 35 insertions(+), 15 deletions(-) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index be06b75c0..b34fbae2e 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -166,7 +166,7 @@ decoded, dist = nap.decode_template( data=transients, epochs=epochs, bin_size=0.1, - metric="correlation" + metric="correlation", ) ``` @@ -216,30 +216,49 @@ to see which metrics are supported, here are a couple examples: ```{code-cell} ipython3 :tags: [hide-input] metrics = [ - "correlation", - "cosine", - "euclidean", - "braycurtis", - "canberra", + "braycurtis", + "canberra", + "chebyshev", + "cityblock", + "correlation", + "cosine", + "dice", + "euclidean", + "jensenshannon", + "mahalanobis", + "minkowski", + "seuclidean", + "sqeuclidean", ] -fig, axs = plt.subplots(len(metrics), 1, figsize=(8,8), sharex=True) -for metric, ax in zip(metrics, axs): +fig, axs = plt.subplots(len(metrics), 1, figsize=(8,32), sharex=True, sharey=True) +for metric, ax in zip(metrics, axs.flatten()): decoded, dist = nap.decode_template( tuning_curves=tuning_curves, data=transients, bin_size=0.1, metric=metric, - epochs=epochs + epochs=epochs, + ) + # normalize distance for better visualization + dist_norm = (dist - np.min(dist.values, axis=1, keepdims=True)) / np.ptp( + dist.values, axis=1, keepdims=True ) im = ax.imshow( - dist.values.T, + dist_norm.values.T, aspect="auto", origin="lower", cmap="inferno_r", extent=(epochs.start[0], epochs.end[0], 0.0, 2*np.pi) ) - plt.colorbar(im, cmap="inferno", label=metric) + if metric != metrics[-1]: + ax.spines['bottom'].set_visible(False) + ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False) + ax.set_yticks([]) + ax.spines['left'].set_visible(False) + ax.set_title(metric) +cbar_ax = fig.add_axes([0.95, ax.get_position().y0, 0.015, ax.get_position().height]) +fig.colorbar(im, cax=cbar_ax, label="Norm. distance", cmap="inferno_r") ax.set_xlabel("Time (s)") ax.set_ylabel("Angle [rad]") plt.show() @@ -262,7 +281,7 @@ for metric in metrics: data=transients, bin_size=0.1, metric=metric, - epochs=halves.loc[[1]] + epochs=halves.loc[[1]], ) errors[metric] = absolute_angular_error( angle.interpolate(decoded).values, decoded.values @@ -281,6 +300,7 @@ bp = ax.boxplot( showfliers=False ) ax.set_ylabel("Angular error [rad]") +plt.setp(ax.get_xticklabels(), rotation=90) # Add median labels for i, line in enumerate(bp['medians']): @@ -289,16 +309,16 @@ for i, line in enumerate(bp['medians']): ax.text( median_x, median_y + 0.1, - f"{median_y:.3f}", + f"{median_y:.2f}", va="center", ha="center", - fontsize=9, + fontsize=7, color="black", ) plt.show() ``` -In this case, the `braycurtis` distance yields the lowest angular error. +In this case, the `jensenshannon` distance yields the lowest angular error. :::{card} Authors From ff46170e5e4a189c2dbf59470bbf1a6b778a0578 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 7 Aug 2025 16:38:34 +0000 Subject: [PATCH 119/244] horizontal plot + violin --- doc/examples/tutorial_calcium_imaging.md | 32 ++++++++---------------- 1 file changed, 10 insertions(+), 22 deletions(-) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index b34fbae2e..6d053c712 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -293,32 +293,20 @@ for metric in metrics: sorted_items = sorted(errors.items(), key=lambda item: np.median(item[1])) sorted_labels, sorted_values = zip(*sorted_items) -fig, ax = plt.subplots(figsize=(8, 4)) -bp = ax.boxplot( - x=sorted_values, - tick_labels=sorted_labels, - showfliers=False +fig, ax = plt.subplots(figsize=(8, 8)) +vp = ax.violinplot( + dataset=sorted_values, + showmeans=False, + showmedians=True, + vert=False # Make it horizontal ) -ax.set_ylabel("Angular error [rad]") -plt.setp(ax.get_xticklabels(), rotation=90) - -# Add median labels -for i, line in enumerate(bp['medians']): - median_y = line.get_ydata()[0] - median_x = line.get_xdata().mean() - ax.text( - median_x, - median_y + 0.1, - f"{median_y:.2f}", - va="center", - ha="center", - fontsize=7, - color="black", - ) +ax.set_yticks(range(1, len(sorted_labels) + 1)) +ax.set_yticklabels(sorted_labels) +ax.set_xlabel("Angular error [rad]") plt.show() ``` -In this case, the `jensenshannon` distance yields the lowest angular error. +In this case, `jensenshannon` yields the lowest angular error. :::{card} Authors From 875e83528c4939456b231492ebd7fb22e3407897 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 7 Aug 2025 17:54:01 +0000 Subject: [PATCH 120/244] further updates in calcium tutorial --- doc/examples/tutorial_calcium_imaging.md | 41 ++++++++++++------------ 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index 6d053c712..6aa10fc78 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -216,23 +216,23 @@ to see which metrics are supported, here are a couple examples: ```{code-cell} ipython3 :tags: [hide-input] metrics = [ - "braycurtis", - "canberra", "chebyshev", - "cityblock", - "correlation", - "cosine", "dice", + "canberra", + "sqeuclidean", + "minkowski", "euclidean", - "jensenshannon", + "cityblock", "mahalanobis", - "minkowski", + "correlation", + "cosine", "seuclidean", - "sqeuclidean", + "braycurtis", + "jensenshannon", ] -fig, axs = plt.subplots(len(metrics), 1, figsize=(8,32), sharex=True, sharey=True) -for metric, ax in zip(metrics, axs.flatten()): +fig, axs = plt.subplots(5, 1, figsize=(8,12), sharex=True, sharey=True) +for metric, ax in zip(metrics[-5:], axs.flatten()): decoded, dist = nap.decode_template( tuning_curves=tuning_curves, data=transients, @@ -244,6 +244,7 @@ for metric, ax in zip(metrics, axs.flatten()): dist_norm = (dist - np.min(dist.values, axis=1, keepdims=True)) / np.ptp( dist.values, axis=1, keepdims=True ) + ax.plot(angle.restrict(epochs), label="True") im = ax.imshow( dist_norm.values.T, aspect="auto", @@ -256,11 +257,11 @@ for metric, ax in zip(metrics, axs.flatten()): ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False) ax.set_yticks([]) ax.spines['left'].set_visible(False) - ax.set_title(metric) -cbar_ax = fig.add_axes([0.95, ax.get_position().y0, 0.015, ax.get_position().height]) -fig.colorbar(im, cax=cbar_ax, label="Norm. distance", cmap="inferno_r") + ax.set_ylabel(metric) +cbar_ax = fig.add_axes([0.92, ax.get_position().y0, 0.015, ax.get_position().height]) +cbar=fig.colorbar(im, cax=cbar_ax) +cbar.set_label("Norm. distance") ax.set_xlabel("Time (s)") -ax.set_ylabel("Angle [rad]") plt.show() ``` @@ -294,14 +295,12 @@ sorted_items = sorted(errors.items(), key=lambda item: np.median(item[1])) sorted_labels, sorted_values = zip(*sorted_items) fig, ax = plt.subplots(figsize=(8, 8)) -vp = ax.violinplot( - dataset=sorted_values, - showmeans=False, - showmedians=True, - vert=False # Make it horizontal +bp = ax.boxplot( + x=sorted_values, + tick_labels=sorted_labels, + vert=False, + showfliers=False ) -ax.set_yticks(range(1, len(sorted_labels) + 1)) -ax.set_yticklabels(sorted_labels) ax.set_xlabel("Angular error [rad]") plt.show() ``` From e0753bfcd05f60d151e28d2b4480c191a9fd1da2 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 7 Aug 2025 18:17:28 +0000 Subject: [PATCH 121/244] typos + ref to tutorial in user guide --- doc/user_guide/07_decoding.md | 3 +++ pynapple/process/decoding.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index 2d836994c..94e35dac1 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -443,3 +443,6 @@ ax3.scatter( ax3.set_title("Combined") plt.show() ``` + +Take a look at the [tutorial on calcium imaging](../examples/tutorial_calcium_imaging.md) +for an application of template matching with real data and a comparison of various distance metrics! diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index bf360f3ec..63c14be22 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -370,7 +370,7 @@ def decode_template( .. math:: - \\hat{x}(t) = \\arg\\max\\limits_{x} dist(f(x), n(t)) + \\hat{x}(t) = \\arg\\min\\limits_{x} [dist(f(x), n(t))] where: From b0ceb529d33a376f1180ba1b88e3b4ab349b8bfa Mon Sep 17 00:00:00 2001 From: Wolf De Wulf Date: Thu, 14 Aug 2025 13:16:40 +0000 Subject: [PATCH 122/244] Update doc/examples/tutorial_HD_dataset.md Co-authored-by: Sarah Jo Venditto --- doc/examples/tutorial_HD_dataset.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/examples/tutorial_HD_dataset.md b/doc/examples/tutorial_HD_dataset.md index 3f92f2f9c..034c028f9 100644 --- a/doc/examples/tutorial_HD_dataset.md +++ b/doc/examples/tutorial_HD_dataset.md @@ -265,7 +265,7 @@ plt.show() ``` -The decoded HD closely matches the actual HD, and thus the population activity in ADn is a reliable estimate of the heading direction of the animal. +The decoded HD (dashed grey line) closely matches the actual HD (solid white line), and thus the population activity in ADn is a reliable estimate of the heading direction of the animal. I hope this tutorial was helpful. If you have any questions, comments or suggestions, please feel free to reach out to the Pynapple Team! From ca973e161b910cd57a809149ac511836ed4cb264 Mon Sep 17 00:00:00 2001 From: Wolf De Wulf Date: Thu, 14 Aug 2025 13:16:50 +0000 Subject: [PATCH 123/244] Update doc/examples/tutorial_calcium_imaging.md Co-authored-by: Sarah Jo Venditto --- doc/examples/tutorial_calcium_imaging.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index 6aa10fc78..c0d969499 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -156,7 +156,7 @@ plt.show() Calcium decoding --------------------- -Given some tuning curves, we can also try to decode head direction from the population. +Given some tuning curves, we can also try to decode head direction from the population activity. For calcium imaging data, Pynapple has `decode_template`, which implements a template matching algorithm. ```{code-cell} ipython3 From f9cf0dfb3136294b08c4503d99b7baf9de241c83 Mon Sep 17 00:00:00 2001 From: Wolf De Wulf Date: Thu, 14 Aug 2025 13:17:03 +0000 Subject: [PATCH 124/244] Update doc/examples/tutorial_calcium_imaging.md Co-authored-by: Sarah Jo Venditto --- doc/examples/tutorial_calcium_imaging.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index c0d969499..1d8377c9c 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -209,7 +209,7 @@ plt.show() ``` The distance metric you choose can influence how well we decode. -Internally, ``decode_template`` uses `scipy.spatial.distance.cdist` to compute the distance matrix, +Internally, ``decode_template`` uses `scipy.spatial.distance.cdist` to compute the distance matrix; you can take a look at [its documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html) to see which metrics are supported, here are a couple examples: From 917fd6fb50460a077c0680b0c5ae7d053ed1ea3b Mon Sep 17 00:00:00 2001 From: Wolf De Wulf Date: Thu, 14 Aug 2025 13:17:14 +0000 Subject: [PATCH 125/244] Update doc/examples/tutorial_calcium_imaging.md Co-authored-by: Sarah Jo Venditto --- doc/examples/tutorial_calcium_imaging.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index 1d8377c9c..c037194e5 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -211,7 +211,7 @@ plt.show() The distance metric you choose can influence how well we decode. Internally, ``decode_template`` uses `scipy.spatial.distance.cdist` to compute the distance matrix; you can take a look at [its documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html) -to see which metrics are supported, here are a couple examples: +to see which metrics are supported. Here are a couple examples: ```{code-cell} ipython3 :tags: [hide-input] From 45f38fef1fc542adcba85b47cfbc6f5eab9ca9c3 Mon Sep 17 00:00:00 2001 From: Wolf De Wulf Date: Thu, 14 Aug 2025 13:17:24 +0000 Subject: [PATCH 126/244] Update doc/examples/tutorial_calcium_imaging.md Co-authored-by: Sarah Jo Venditto --- doc/examples/tutorial_calcium_imaging.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index c037194e5..243176b0d 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -265,7 +265,7 @@ ax.set_xlabel("Time (s)") plt.show() ``` -We recommend to try out a bunch and see which one works best for you. +We recommend trying a bunch to see which one works best for you. In the case of head direction, we can quantify how well we decode using the absolute angular error. To get a fair estimate of error, we will compute the tuning curves on the first half of the data and compute the error for predictions of the second half. From eaa946e3044b23bab28f61b41e541fc905475e5a Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 14 Aug 2025 13:28:35 +0000 Subject: [PATCH 127/244] rename calcium tutorial --- doc/examples.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/examples.md b/doc/examples.md index 42f540b3e..d9202c226 100644 --- a/doc/examples.md +++ b/doc/examples.md @@ -24,7 +24,7 @@ Streaming data from Dandi :::{card} ```{toctree} :maxdepth: 3 -Computing calcium imaging tuning curves +Analyzing calcium imaging data ``` ::: From de8ec01d90b4b51c6739bbd25419f7681950bf50 Mon Sep 17 00:00:00 2001 From: gviejo Date: Wed, 20 Aug 2025 13:14:18 -0400 Subject: [PATCH 128/244] Changing some text --- doc/user_guide/07_decoding.md | 4 ++-- pynapple/process/decoding.py | 8 ++++---- tests/test_decoding.py | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index 94e35dac1..d86f3ad67 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -30,9 +30,9 @@ For any other type of data (and also for spike data), you can use [`decode_templ Input to both decoding functions always includes: - `tuning_curves`, computed using [`compute_tuning_curves`](pynapple.process.tuning_curves.compute_tuning_curves). - - `data`, neural activity as a `TsGroup` (spikes) or `TsdFrame` (smoothed counts). + - `data`, neural activity as a `TsGroup` (spikes) or `TsdFrame` (smoothed counts or calcium activity or any other time series). - `epochs`, to restrict decoding to certain intervals. - - `bin_size`, the size of the bins in which to count the data (spikes only). + - `bin_size`, the size of the bins in which to count timestamps when data is a `TsGroup` object. - `time_units`, the units of `bin_size`, defaulting to seconds. ## Bayesian decoding diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 63c14be22..f0461b68e 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -24,7 +24,7 @@ def wrapper(*args, **kwargs): tuning_curves = kwargs["tuning_curves"] if not isinstance(tuning_curves, xr.DataArray): raise TypeError( - "tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves." + "tuning_curves should be an xr.DataArray as computed by compute_tuning_curves." ) # check data @@ -166,7 +166,7 @@ def decode_bayes( Parameters ---------- tuning_curves : xr.DataArray - Tuning curves as outputed by `compute_tuning_curves`. + Tuning curves as computed by `compute_tuning_curves`. data : TsGroup or TsdFrame Neural activity with the same keys as the tuning curves. You may also pass a TsdFrame with smoothed counts. @@ -186,7 +186,7 @@ def decode_bayes( Tsd The decoded feature. TsdFrame, TsdTensor - The probability distribution of the decoded trajectory for each time bin. + The probability distribution of the decoded feature for each time bin. Examples -------- @@ -394,7 +394,7 @@ def decode_template( Parameters ---------- tuning_curves : xr.DataArray - Tuning curves as outputed by `compute_tuning_curves`. + Tuning curves as computed by `compute_tuning_curves`. data : TsGroup or TsdFrame Neural activity with the same keys as the tuning curves. You may also pass a TsdFrame with smoothed counts. diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 08e83e1e1..80fc16b7d 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -55,21 +55,21 @@ def get_testing_set_n(n_features=1, binned=False): {"tuning_curves": []}, pytest.raises( TypeError, - match="tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves.", + match="tuning_curves should be an xr.DataArray as computed by compute_tuning_curves.", ), ), ( {"tuning_curves": 1}, pytest.raises( TypeError, - match="tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves.", + match="tuning_curves should be an xr.DataArray as computed by compute_tuning_curves.", ), ), ( {"tuning_curves": get_testing_set_n()["tuning_curves"].to_pandas().T}, pytest.raises( TypeError, - match="tuning_curves should be an xr.DataArray as outputed by compute_tuning_curves.", + match="tuning_curves should be an xr.DataArray as computed by compute_tuning_curves.", ), ), ( From dbe174631e52ae8ac503b9ae6b32fca9ca6aaf01 Mon Sep 17 00:00:00 2001 From: gviejo Date: Wed, 20 Aug 2025 13:16:35 -0400 Subject: [PATCH 129/244] Removing test.py --- test.py | 230 -------------------------------------------------------- 1 file changed, 230 deletions(-) delete mode 100644 test.py diff --git a/test.py b/test.py deleted file mode 100644 index 45048cfb0..000000000 --- a/test.py +++ /dev/null @@ -1,230 +0,0 @@ -import pynapple as nap -import numpy as np -import matplotlib.pyplot as plt -import seaborn as sns -import xarray as xr -from scipy.ndimage import gaussian_filter1d -import pandas as pd - -SMALL_SIZE = 8 -MEDIUM_SIZE = 10 -BIGGER_SIZE = 12 - -plt.rc("font", size=SMALL_SIZE) # controls default text sizes -plt.rc("axes", titlesize=SMALL_SIZE) # fontsize of the axes title -plt.rc("axes", labelsize=MEDIUM_SIZE) # fontsize of the x and y labels -plt.rc("xtick", labelsize=SMALL_SIZE) # fontsize of the tick labels -plt.rc("ytick", labelsize=SMALL_SIZE) # fontsize of the tick labels -plt.rc("legend", fontsize=SMALL_SIZE) # legend fontsize -plt.rc("figure", titlesize=BIGGER_SIZE) # fontsize of the figure title - - -custom_params = {"axes.spines.right": False, "axes.spines.top": False} -sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params) -xr.set_options(display_expand_attrs=False) -group = { - 0: nap.Ts(t=np.sort(np.random.uniform(0, 100, 10))), - 1: nap.Ts(t=np.sort(np.random.uniform(0, 100, 20))), - 2: nap.Ts(t=np.sort(np.random.uniform(0, 100, 30))), -} -tsgroup = nap.TsGroup(group) -dt = 0.01 -T = 10 -epoch = nap.IntervalSet(start=0, end=T, time_units="s") -features = np.vstack((np.cos(np.arange(0, T, dt)), np.sin(np.arange(0, T, dt)))).T -features = nap.TsdFrame( - t=np.arange(0, T, dt), - d=features, - time_units="s", - time_support=epoch, - columns=["x", "y"], -) - - -# Calcium activity -ft = features.values -alpha = np.arctan2(ft[:, 1], ft[:, 0]) -bin_centers = np.linspace(-np.pi, np.pi, 6) -kappa = 4.0 -units = [] -for i, mu in enumerate(bin_centers): - units.append(np.exp(kappa * np.cos(alpha - mu))) # wrapped Gaussian -units = np.stack(units, axis=1) -tsdframe = nap.TsdFrame(t=features.times(), d=units) -tuning_curves_2d = nap.compute_tuning_curves( - data=tsdframe, features=features, bins=9, feature_names=["x", "y"] -) -tuning_curves_2d -tuning_curves_2d.name = "ΔF/F" -tuning_curves_2d.attrs["unit"] = "a.u." -g = tuning_curves_2d.plot( - col="unit", - col_wrap=3, - figsize=(8, 5), - add_colorbar=False, # IMPORTANT: don't add colorbar yet -) -g.fig.set_constrained_layout(True) -mappable = g.axs.flat[0].collections[0] # likely for pcolormesh or contourf - -# Add shared colorbar -cbar = g.fig.colorbar(mappable, ax=g.axs.ravel().tolist(), location="right", shrink=0.8) -cbar.set_label("ΔF/F [a.u.]") -plt.xticks([-1, 0, 1]) -plt.yticks([-1, 0, 1]) -plt.savefig("tuning_curves_2d.pdf", dpi=300) -plt.close() - - -dt = 0.1 -epochs = nap.IntervalSet(start=0, end=1000, time_units="s") -features = np.vstack((np.cos(np.arange(0, 1000, dt)), np.sin(np.arange(0, 1000, dt)))).T -features = nap.TsdFrame( - t=np.arange(0, 1000, dt), - d=features, - time_units="s", - time_support=epochs, - columns=["x", "y"], -) - -times = features.as_units("us").index.values -ft = features.values -alpha = np.arctan2(ft[:, 1], ft[:, 0]) -bin_centers = np.linspace(-np.pi, np.pi, 12) -kappa = 4.0 -ts_group = {} -for i, mu in enumerate(bin_centers): - weights = np.exp(kappa * np.cos(alpha - mu)) # wrapped Gaussian - weights /= np.max(weights) # normalize to 0–1 - mask = weights > 0.5 - ts = times[mask] - ts_group[i] = nap.Ts(ts, time_units="us") -ts_group = nap.TsGroup(ts_group) - -tuning_curves_2d = nap.compute_tuning_curves( - data=ts_group, - features=features, # containing 2 features - bins=9, - epochs=epochs, - range=[(-1.0, 1.0), (-1.0, 1.0)], # range can be specified for each feature -) -decoded, proba_feature = nap.decode_bayes( - tuning_curves=tuning_curves_2d, - data=ts_group, - epochs=epochs, - bin_size=0.2, -) -fig, (ax1, ax2, ax3) = plt.subplots( - figsize=(8, 3.5), nrows=1, ncols=3, sharey=True, layout="constrained" -) -ax1.plot(features["x"].get(0, 20), label="True") -ax1.scatter( - decoded["x"].get(0, 20).times(), - decoded["x"].get(0, 20), - label="Decoded", - c="orange", -) -ax1.set_title("x") -ax1.set_xlabel("Time (s)") - -ax2.plot(features["y"].get(0, 20), label="True") -ax2.scatter( - decoded["y"].get(0, 20).times(), - decoded["y"].get(0, 20), - label="Decoded", - c="orange", -) -ax2.set_xlabel("Time (s)") -ax2.set_title("y") - -ax3.plot( - features["x"].get(0, 20), - features["y"].get(0, 20), - label="True", -) -ax3.scatter( - decoded["x"].get(0, 20), - decoded["y"].get(0, 20), - label="Decoded", - c="orange", -) -ax3.set_title("Combined") -plt.savefig("decode_template_2d.pdf", dpi=300) -plt.close() - - -# Fake Tuning curves -N = 6 # Number of neurons -bins = np.linspace(0, 2 * np.pi, 61) -x = np.linspace(-np.pi, np.pi, len(bins) - 1) -tmp = np.roll(np.exp(-((1.5 * x) ** 2)), (len(bins) - 1) // 2) -tc = np.array([np.roll(tmp, i * (len(bins) - 1) // N) for i in range(N)]).T - -tc_1d = pd.DataFrame(index=bins[0:-1], data=tc) - -# Feature -T = 10000 -dt = 0.01 -timestep = np.arange(0, T) * dt -feature = nap.Tsd( - t=timestep, - d=gaussian_filter1d(np.cumsum(np.random.randn(T) * 0.5), 20) % (2 * np.pi), -) -index = np.digitize(feature, bins) - 1 - -# Spiking activity - -count = np.random.poisson(tc[index]) > 0 -tsgroup = nap.TsGroup({i: nap.Ts(timestep[count[:, i]]) for i in range(N)}) -epochs = nap.IntervalSet(0, 10) -tuning_curves_1d = nap.compute_tuning_curves( - tsgroup, feature, bins=61, range=(0, 2 * np.pi), feature_names=["Circular feature"] -) - -fig, ax = plt.subplots(figsize=(8, 3), layout="constrained") -tuning_curves_1d.name = "Firing rate" -tuning_curves_1d.attrs["unit"] = "Hz" -tuning_curves_1d.coords["Circular feature"].attrs["unit"] = "rad" -tuning_curves_1d.plot.line( - ax=ax, - x="Circular feature", - add_legend=False, -) -plt.xticks([0, 2 * np.pi], ["0", "2π"]) -plt.xlabel("Circular feature [rad]", labelpad=-16) -plt.savefig("tuning_curves_1d.pdf", dpi=300) - -decoded, proba_feature = nap.decode_bayes( - tuning_curves=tuning_curves_1d, - data=tsgroup, - epochs=epochs, - bin_size=0.06, -) -fig, (ax1, ax2) = plt.subplots( - figsize=(8, 4), nrows=2, ncols=1, sharex=True, layout="constrained" -) -ax1.plot( - np.linspace(0, len(decoded), len(feature.restrict(epochs))), - feature.restrict(epochs), - label="True", -) -ax1.scatter( - np.linspace(0, len(decoded), len(decoded)), - decoded, - label="Decoded", - c="orange", -) -ax1.legend( - frameon=False, - bbox_to_anchor=(1.0, 1.0), -) -ax1.set_xlim(epochs[0, 0], epochs[0, 1]) -im = ax2.imshow(proba_feature.values.T, aspect="auto", origin="lower", cmap="viridis") -cbar_ax = fig.add_axes([0.8, 0.1, 0.015, 0.41]) -fig.colorbar(im, cax=cbar_ax, label="Probability") -ax2.set_xticks([0, len(decoded)], epochs.values[0]) -ax2.set_yticks([0, proba_feature.shape[1] - 1], ["0", "2π"]) -ax1.set_yticks([0, 2 * np.pi], ["0", "2π"]) -ax1.set_ylabel("Circular\nfeature [rad]") -ax2.set_xlabel("Time (s)", labelpad=-20) -ax2.set_ylabel("Circular\nfeature [rad]") -plt.savefig("decode_bayes_1d.pdf", dpi=300) From 84fc65805dfe55fef626d6ed583fccce2701ed00 Mon Sep 17 00:00:00 2001 From: Jacob Date: Sat, 23 Aug 2025 13:31:21 -0400 Subject: [PATCH 130/244] Update detect_oscillatory_events --- pynapple/process/filtering.py | 28 ++++++++++++++++++++-------- tests/test_filtering.py | 34 ++++++++++++++++++---------------- 2 files changed, 38 insertions(+), 24 deletions(-) diff --git a/pynapple/process/filtering.py b/pynapple/process/filtering.py index 44af76bf6..68add1260 100644 --- a/pynapple/process/filtering.py +++ b/pynapple/process/filtering.py @@ -513,15 +513,22 @@ def get_filter_frequency_response( def detect_oscillatory_events( - lfp, epoch, freq_band, thresh_band, duration_band, min_inter_duration, wsize=51 + data, + epoch, + freq_band, + thresh_band, + duration_band, + min_inter_duration, + fs=None, + wsize=51, ): """ Simple helper for detecting oscillatory events (e.g. ripples, spindles) Parameters ---------- - lfp : Tsd - Should be a single channel raw lfp + data : Tsd + 1-dimensional time series epoch : IntervalSet The epoch for restricting the detection freq_band : tuple @@ -532,8 +539,10 @@ def detect_oscillatory_events( The (min, max) duration of an event in second min_inter_duration : float The minimum duration between two events otherwise they are merged (in seconds) + fs : float, optional + The sampling frequency of the signal in Hz. If not provided, it will be inferred from the time axis of the data. wsize : int, optional - The size of the window for digitial filtering + The size of the window for digital filtering Returns ------- @@ -541,9 +550,12 @@ def detect_oscillatory_events( The interval set of detected events with metadata containing the power, amplitude, and peak_time """ - lfp = lfp.restrict(epoch) - frequency = lfp.rate - signal = apply_bandpass_filter(lfp, freq_band, frequency) + data = data.restrict(epoch) + + if fs is None: + fs = data.rate + + signal = apply_bandpass_filter(data, freq_band, fs) squared_signal = np.square(signal.values) window = np.ones(wsize) / wsize @@ -569,7 +581,7 @@ def detect_oscillatory_events( peak_times = [] for s, e in osc_ep.values: - seg = signal.restrict(nap.IntervalSet(s, e)) + seg = signal.get(s, e) if len(seg) == 0: powers.append(np.nan) amplitudes.append(np.nan) diff --git a/tests/test_filtering.py b/tests/test_filtering.py index d54c44825..499bda874 100644 --- a/tests/test_filtering.py +++ b/tests/test_filtering.py @@ -526,13 +526,14 @@ def test_get_filter_frequency_response_error(): @pytest.mark.parametrize( - "freq_band, thresh_band, start, end", + "freq_band, thresh_band, num_events, start, end", [ - ((10, 30), (1, 10), 0, 2), - ((40, 60), (1, 10), 3, 5), + ((10, 30), (1, 10), 1, 0, 2), + ((40, 60), (1, 10), 1, 3, 5), + ((100, 150), (1, 10), 0, None, None), ], ) -def test_detect_oscillatory_events(freq_band, thresh_band, start, end): +def test_detect_oscillatory_events(freq_band, thresh_band, num_events, start, end): fs = 1000 duration = 5 min_dur = 0.1 @@ -558,18 +559,19 @@ def test_detect_oscillatory_events(freq_band, thresh_band, start, end): ts, epoch, freq_band, thresh_band, (min_dur, max_dur), min_inter ) - assert len(osc_ep) == 1 # Only one event in given freq_band + assert len(osc_ep) == num_events # Only one event in given freq_band - # Start and end should be close to actuals +/- a small amount - detected_start = osc_ep.start[0] - detected_end = osc_ep.end[0] - assert np.isclose(start, detected_start, atol=0.05) - assert np.isclose(end, detected_end, atol=0.05) + if num_events > 0: + # Start and end should be close to actuals +/- a small amount + detected_start = osc_ep.start[0] + detected_end = osc_ep.end[0] + assert np.isclose(start, detected_start, atol=0.05) + assert np.isclose(end, detected_end, atol=0.05) - # Check we store power, amplitude, and peak_time - for key in ["power", "amplitude", "peak_time"]: - assert key in osc_ep._metadata + # Check we store power, amplitude, and peak_time + for key in ["power", "amplitude", "peak_time"]: + assert key in osc_ep._metadata - # Check peak_time is within the interval - peak_time = osc_ep._metadata["peak_time"][0] - assert start <= peak_time <= end + # Check peak_time is within the interval + peak_time = osc_ep._metadata["peak_time"][0] + assert start <= peak_time <= end From d1d144c144a5fda564242c308d9963f7f565676c Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Thu, 11 Sep 2025 16:44:25 -0400 Subject: [PATCH 131/244] Update --- doc/examples.md | 8 -------- 1 file changed, 8 deletions(-) diff --git a/doc/examples.md b/doc/examples.md index d9202c226..731423656 100644 --- a/doc/examples.md +++ b/doc/examples.md @@ -29,14 +29,6 @@ Analyzing calcium imaging data ::: -:::{card} -```{toctree} -:maxdepth: 3 -Example perievent -``` -::: - - :::{card} ```{toctree} :maxdepth: 3 From 693420b566389a26c2f58e2facfc1b64d992731c Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Mon, 15 Sep 2025 10:19:20 -0400 Subject: [PATCH 132/244] Fixing perievent --- pynapple/process/_process_functions.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/pynapple/process/_process_functions.py b/pynapple/process/_process_functions.py index 94e07e75f..888113f43 100644 --- a/pynapple/process/_process_functions.py +++ b/pynapple/process/_process_functions.py @@ -247,17 +247,16 @@ def _perievent_continuous( w_sizes = slice_idx[:, 1] - slice_idx[:, 0] # Different sizes all_w_sizes = np.unique(w_sizes) - all_w_start = np.unique(w_starts) for w_size in all_w_sizes: - for w_start in all_w_start: - col_idx = w_sizes == w_size - new_idx = np.zeros((w_size, np.sum(col_idx)), dtype=int) - for i, tmp in enumerate(slice_idx[col_idx]): - new_idx[:, i] = np.arange(tmp[0], tmp[1]) - - new_data_array[w_start : w_start + w_size, col_idx] = data_array[ - new_idx - ] + col_idx = w_sizes == w_size + + for st in np.unique(w_starts[col_idx]): + col_idx2 = col_idx & (w_starts == st) + new_idx = np.zeros((w_size, np.sum(col_idx2)), dtype=int) + for i, slc in enumerate(slice_idx[col_idx2]): + new_idx[:, i] = np.arange(slc[0], slc[1]) + + new_data_array[st : st + w_size, col_idx2] = data_array[new_idx] return new_data_array From 3acfa162d1649516ef2d4b78de97198945f84713 Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Mon, 15 Sep 2025 11:16:23 -0400 Subject: [PATCH 133/244] Update pynapple/process/_process_functions.py Co-authored-by: Edoardo Balzani --- pynapple/process/_process_functions.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/pynapple/process/_process_functions.py b/pynapple/process/_process_functions.py index 888113f43..f9960c2c1 100644 --- a/pynapple/process/_process_functions.py +++ b/pynapple/process/_process_functions.py @@ -249,13 +249,12 @@ def _perievent_continuous( all_w_sizes = np.unique(w_sizes) for w_size in all_w_sizes: - col_idx = w_sizes == w_size - - for st in np.unique(w_starts[col_idx]): - col_idx2 = col_idx & (w_starts == st) - new_idx = np.zeros((w_size, np.sum(col_idx2)), dtype=int) - for i, slc in enumerate(slice_idx[col_idx2]): - new_idx[:, i] = np.arange(slc[0], slc[1]) + unique_pairs = np.unique(np.column_stack([w_sizes, w_starts]), axis=0) + for w_size, w_start in unique_pairs: + col_idx = (w_sizes == w_size) & (w_starts == w_start) + new_idx = np.zeros((w_size, np.sum(col_idx)), dtype=int) + for i, slc in enumerate(slice_idx[col_idx]): + new_idx[:, i] = np.arange(slc[0], slc[1]) new_data_array[st : st + w_size, col_idx2] = data_array[new_idx] From 951dcd75d05b3d78d74e5cca65ce3940473d8d93 Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Mon, 15 Sep 2025 11:16:36 -0400 Subject: [PATCH 134/244] Update pynapple/process/_process_functions.py Co-authored-by: Edoardo Balzani --- pynapple/process/_process_functions.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pynapple/process/_process_functions.py b/pynapple/process/_process_functions.py index f9960c2c1..060aef5bf 100644 --- a/pynapple/process/_process_functions.py +++ b/pynapple/process/_process_functions.py @@ -248,7 +248,6 @@ def _perievent_continuous( all_w_sizes = np.unique(w_sizes) - for w_size in all_w_sizes: unique_pairs = np.unique(np.column_stack([w_sizes, w_starts]), axis=0) for w_size, w_start in unique_pairs: col_idx = (w_sizes == w_size) & (w_starts == w_start) From b163c48317b5a54d5900c6418a93feca43c7a0a0 Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Mon, 15 Sep 2025 11:21:06 -0400 Subject: [PATCH 135/244] linting --- pynapple/process/_process_functions.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pynapple/process/_process_functions.py b/pynapple/process/_process_functions.py index 060aef5bf..415aa6f75 100644 --- a/pynapple/process/_process_functions.py +++ b/pynapple/process/_process_functions.py @@ -246,8 +246,6 @@ def _perievent_continuous( w_sizes = slice_idx[:, 1] - slice_idx[:, 0] # Different sizes - all_w_sizes = np.unique(w_sizes) - unique_pairs = np.unique(np.column_stack([w_sizes, w_starts]), axis=0) for w_size, w_start in unique_pairs: col_idx = (w_sizes == w_size) & (w_starts == w_start) From df16878e2144c1f1b756d00ab8a5591a3e7a6e31 Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Mon, 15 Sep 2025 11:29:53 -0400 Subject: [PATCH 136/244] last fix --- pynapple/process/_process_functions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pynapple/process/_process_functions.py b/pynapple/process/_process_functions.py index 415aa6f75..86d491e81 100644 --- a/pynapple/process/_process_functions.py +++ b/pynapple/process/_process_functions.py @@ -253,6 +253,6 @@ def _perievent_continuous( for i, slc in enumerate(slice_idx[col_idx]): new_idx[:, i] = np.arange(slc[0], slc[1]) - new_data_array[st : st + w_size, col_idx2] = data_array[new_idx] + new_data_array[w_start : w_start + w_size, col_idx] = data_array[new_idx] return new_data_array From e88d4d4f43a21d050534bed4f837b2be59b61fa5 Mon Sep 17 00:00:00 2001 From: sjvenditto Date: Mon, 22 Sep 2025 15:17:21 -0400 Subject: [PATCH 137/244] fix tsdframe repr --- pynapple/core/time_series.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index 357b0644a..5f87eedab 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -1535,7 +1535,7 @@ def __repr__(self): np.hstack( ( self.index[0:n_rows, None], - np.round(self.values[0:n_rows, 0:max_cols], 5), + self.values[0:n_rows, 0:max_cols], ends, ), dtype=object, @@ -1543,7 +1543,7 @@ def __repr__(self): np.array( [ ["..."] - + ["..."] * np.minimum(max_cols, self.shape[1]) + + [None] * np.minimum(max_cols, self.shape[1]) + end ], dtype=object, @@ -1551,7 +1551,7 @@ def __repr__(self): np.hstack( ( self.index[-n_rows:, None], - np.round(self.values[-n_rows:, 0:max_cols], 5), + self.values[-n_rows:, 0:max_cols], ends, ), dtype=object, @@ -1563,7 +1563,7 @@ def __repr__(self): table = np.hstack( ( self.index[:, None], - np.round(self.values[:, 0:max_cols], 5), + self.values[:, 0:max_cols], ends, ), dtype=object, From 5a445a901a569addff67db323df9e926547263ab Mon Sep 17 00:00:00 2001 From: sjvenditto Date: Mon, 22 Sep 2025 16:09:53 -0400 Subject: [PATCH 138/244] fix indexing of TsdFrames with Tsd/TsdFrame objects --- pynapple/core/time_series.py | 11 +++++++---- tests/test_time_series.py | 21 +++++++++++++++++++++ 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index 5f87eedab..72732c097 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -1705,15 +1705,18 @@ def __setitem__(self, key, value): def __getitem__(self, key, *args, **kwargs): if isinstance(key, tuple): key = tuple(k.values if hasattr(k, "values") else k for k in key) - if isinstance(key, Tsd): + if isinstance(key, (Tsd, TsdFrame)): try: assert np.issubdtype(key.dtype, np.bool_) except AssertionError: raise ValueError( - "When indexing with a Tsd, it must contain boolean values" + "When indexing with a Tsd or TsdFrame, it must contain boolean values" ) - key = key.d - elif isinstance(key, str): + if isinstance(key, TsdFrame): + return self.values.__getitem__(key.d) + else: + key = key.d + if isinstance(key, str): if key in self.columns: with warnings.catch_warnings(): # ignore deprecated warning for loc diff --git a/tests/test_time_series.py b/tests/test_time_series.py index 4472b634f..859d44f34 100755 --- a/tests/test_time_series.py +++ b/tests/test_time_series.py @@ -1604,6 +1604,27 @@ def test_vert_and_horz_slicing(self, tsdframe, row, col, expected): output, tsdframe.values[row, col] ) + def test_tsd_indexing(self, tsdframe): + tsd_index = tsdframe[:, 0] > 0 + output = tsdframe[tsd_index] + np.testing.assert_array_almost_equal( + output.values, tsdframe.values[tsd_index.values] + ) + assert isinstance(output, nap.TsdFrame) + + with pytest.raises(ValueError, match="must contain boolean values"): + tsdframe[tsd_index + 1] + + tsdframe_index = tsdframe > 0 + output = tsdframe[tsdframe_index] + np.testing.assert_array_almost_equal( + output, tsdframe.values[tsdframe_index.values] + ) + assert isinstance(output, np.ndarray) + + with pytest.raises(ValueError, match="must contain boolean values"): + tsdframe[tsdframe_index + 1] + @pytest.mark.parametrize("index", [0, [0, 2]]) def test_str_indexing(self, tsdframe, index): columns = tsdframe.columns From 11d96ee6fd742c6f7faf4459b790f4e497b2b969 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 6 Oct 2025 13:40:25 +0000 Subject: [PATCH 139/244] generalised mutual information + tests --- pynapple/process/__init__.py | 1 + pynapple/process/tuning_curves.py | 150 +++++++++++++++++++++++++- tests/test_tuning_curves.py | 172 ++++++++++++++++++++++++------ 3 files changed, 286 insertions(+), 37 deletions(-) diff --git a/pynapple/process/__init__.py b/pynapple/process/__init__.py index cba778160..37cc03254 100644 --- a/pynapple/process/__init__.py +++ b/pynapple/process/__init__.py @@ -29,6 +29,7 @@ compute_power_spectral_density, ) from .tuning_curves import ( + compute_mutual_information, compute_1d_mutual_info, compute_1d_tuning_curves, compute_1d_tuning_curves_continuous, diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index e125cb5ce..9dcc8610f 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -297,7 +297,9 @@ def compute_tuning_curves( keys = ( data.keys() if isinstance(data, nap.TsGroup) - else data.columns if isinstance(data, nap.TsdFrame) else [0] + else data.columns + if isinstance(data, nap.TsdFrame) + else [0] ) tcs = np.zeros([len(keys), *occupancy.shape]) if isinstance(data, (nap.TsGroup, nap.Ts)): @@ -500,7 +502,7 @@ def compute_discrete_tuning_curves(group, dict_ep): @_validate_tuning_inputs -def compute_1d_mutual_info(tc, feature, ep=None, minmax=None, bitssec=False): +def compute_1d_mutual_info_old(tc, feature, ep=None, minmax=None, bitssec=False): """ Mutual information of a tuning curve computed from a 1-d feature. @@ -569,7 +571,7 @@ def compute_1d_mutual_info(tc, feature, ep=None, minmax=None, bitssec=False): @_validate_tuning_inputs -def compute_2d_mutual_info(dict_tc, features, ep=None, minmax=None, bitssec=False): +def compute_2d_mutual_info_old(dict_tc, features, ep=None, minmax=None, bitssec=False): """ Mutual information of a tuning curve computed from 2-d features. @@ -649,3 +651,145 @@ def compute_2d_mutual_info(dict_tc, features, ep=None, minmax=None, bitssec=Fals SI = SI / fr[:, 0, 0] SI = pd.DataFrame(index=idx, columns=["SI"], data=SI) return SI + + +def compute_mutual_information(tuning_curves): + """ + Mutual information of a tuning curve. + + See: + + Skaggs, W. E., McNaughton, B. L., & Gothard, K. M. (1993). + An information-theoretic approach to deciphering the hippocampal code. + In Advances in neural information processing systems (pp. 1030-1037). + + Parameters + ---------- + tuning_curves : xarray.DataArray + As outputted by `compute_tuning_curves`. + + Returns + ------- + xarray.DataArray + An array containing the spatial information per unit, in both bits/sec and bits/spike. + """ + if not isinstance(tuning_curves, xr.DataArray): + raise TypeError( + "tuning_curves should be an xr.DataArray as computed by compute_tuning_curves." + ) + + fx = tuning_curves.values + axes = tuple(range(1, fx.ndim)) + fr_keepdims = np.nansum( + fx * tuning_curves.attrs["occupancy"], axis=axes, keepdims=True + ) + fr_scalar = np.squeeze(fr_keepdims, axis=axes) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + fxfr = fx / fr_keepdims + logfx = np.log2(fxfr) + logfx[~np.isfinite(logfx)] = 0.0 + MI_bits_per_sec = np.nansum( + tuning_curves.attrs["occupancy"] * fx * logfx, axis=axes + ) + with np.errstate(divide="ignore", invalid="ignore"): + MI_bits_per_spike = MI_bits_per_sec / fr_scalar + + return xr.DataArray( + data=np.stack([MI_bits_per_sec, MI_bits_per_spike], axis=1), + coords={ + "unit": tuning_curves.coords["unit"], + "bits": ["bits/sec", "bits/spike"], + }, + ) + + +@_validate_tuning_inputs +def compute_2d_mutual_info(dict_tc, features, ep=None, minmax=None, bitssec=False): + warnings.warn( + "compute_2d_mutual_info is deprecated and will be removed in a future version;" + "use compute_mutual_information instead.", + DeprecationWarning, + stacklevel=2, + ) + if type(dict_tc) is dict: + tcs = xr.DataArray( + np.array([dict_tc[i] for i in dict_tc.keys()]), + coords={"unit": list(dict_tc.keys())}, + dims=["unit", "0", "1"], + ) + else: + tcs = xr.DataArray( + dict_tc, + coords={"unit": np.arange(len(dict_tc))}, + dims=["unit", "0", "1"], + ) + + nb_bins = (tcs.shape[1] + 1, tcs.shape[2] + 1) + bins = [] + for i in range(2): + if minmax is None: + bins.append( + np.linspace( + np.nanmin(features[:, i]), np.nanmax(features[:, i]), nb_bins[i] + ) + ) + else: + bins.append( + np.linspace(minmax[i + i % 2], minmax[i + 1 + i % 2], nb_bins[i]) + ) + + if isinstance(ep, nap.IntervalSet): + features = features.restrict(ep) + + occupancy, _, _ = np.histogram2d( + features[:, 0].values.flatten(), + features[:, 1].values.flatten(), + [bins[0], bins[1]], + ) + occupancy = occupancy / occupancy.sum() + + tcs.attrs["occupancy"] = occupancy + MI = compute_mutual_information(tcs) + + if bitssec: + return pd.DataFrame(MI.sel(bits="bits/sec").values, columns=["SI"]) + else: + return pd.DataFrame(MI.sel(bits="bits/spike").values, columns=["SI"]) + + +@_validate_tuning_inputs +def compute_1d_mutual_info(tc, feature, ep=None, minmax=None, bitssec=False): + warnings.warn( + "compute_1d_mutual_info is deprecated and will be removed in a future version;" + "use compute_mutual_information instead.", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(tc, pd.DataFrame): + tcs = xr.DataArray( + tc.values.T, coords={"unit": tc.columns.values, "0": tc.index} + ) + else: + tcs = xr.DataArray( + tc.T, coords={"unit": np.arange(tc.shape[1])}, dims=["unit", "0"] + ) + + nb_bins = tc.shape[0] + 1 + if minmax is None: + bins = np.linspace(np.nanmin(feature), np.nanmax(feature), nb_bins) + else: + bins = np.linspace(minmax[0], minmax[1], nb_bins) + + if isinstance(ep, nap.IntervalSet): + occupancy, _ = np.histogram(feature.restrict(ep).values, bins) + else: + occupancy, _ = np.histogram(feature.values, bins) + occupancy = occupancy / occupancy.sum() + tcs.attrs["occupancy"] = occupancy + MI = compute_mutual_information(tcs) + + if bitssec: + return pd.DataFrame(MI.sel(bits="bits/sec").values, columns=["SI"]) + else: + return pd.DataFrame(MI.sel(bits="bits/spike").values, columns=["SI"]) diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 9b4d223c1..ec1cf9499 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -301,7 +301,7 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): @pytest.mark.parametrize( - "data, features, kwargs, expected", + "data, features, kwargs, expectation", [ # single rate unit, single feature ( @@ -611,12 +611,12 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): ), ], ) -def test_compute_tuning_curves(data, features, kwargs, expected): +def test_compute_tuning_curves(data, features, kwargs, expectation): tcs = nap.compute_tuning_curves(data, features, **kwargs) - if isinstance(expected, pd.DataFrame): - pd.testing.assert_frame_equal(tcs, expected) + if isinstance(expectation, pd.DataFrame): + pd.testing.assert_frame_equal(tcs, expectation) else: - xr.testing.assert_allclose(tcs, expected) + xr.testing.assert_allclose(tcs, expectation) # ------------------------------------------------------------------------------------ @@ -656,7 +656,7 @@ def get_tsdframe(): @pytest.mark.parametrize( - "group, dict_ep, expected_exception", + "group, dict_ep, expectation", [ ( "a", @@ -682,8 +682,8 @@ def get_tsdframe(): ), ], ) -def test_compute_discrete_tuning_curves_errors(group, dict_ep, expected_exception): - with expected_exception: +def test_compute_discrete_tuning_curves_errors(group, dict_ep, expectation): + with expectation: nap.compute_discrete_tuning_curves(group, dict_ep) @@ -712,8 +712,114 @@ def test_compute_discrete_tuning_curves(group, dict_ep): # ------------------------------------------------------------------------------------ +def get_testing_set(n_units=1, n_features=1, pattern="uniform"): + dims = ["unit"] + [f"dim_{i}" for i in range(n_features)] + coords = {"unit": np.arange(n_units)} + shape = (n_units,) + (2,) * n_features # 2 bins per feature, for simplicity + for i in range(n_features): + coords[f"dim_{i}"] = np.arange(2) + + # Build tuning curves + data = np.zeros(shape) + + if pattern == "uniform": + data[:] = 1.0 + expected_mi_per_sec = 0.0 + expected_mi_per_spike = 0.0 + + elif pattern == "onehot": + # Each unit fires in a unique location only + for u in range(n_units): + index = [u] + [0] * n_features + data[tuple(index)] = 1.0 + + n_bins = np.prod(shape[1:]) + expected_mi_per_spike = np.log2(n_bins) + mean_rate = 1.0 / n_bins + expected_mi_per_sec = mean_rate * expected_mi_per_spike + + else: + raise ValueError("Unknown firing_pattern. Use 'uniform' or 'onehot'.") + + tuning_curves = xr.DataArray( + data, + coords=coords, + dims=dims, + attrs={"occupancy": np.ones(shape[1:]) / np.prod(shape[1:])}, + ) + + MI = xr.DataArray( + data=np.stack( + [ + np.full(n_units, expected_mi_per_sec), + np.full(n_units, expected_mi_per_spike), + ], + axis=1, + ), + coords={ + "unit": coords["unit"], + "bits": ["bits/sec", "bits/spike"], + }, + dims=["unit", "bits"], + ) + + return tuning_curves, MI + + +@pytest.mark.parametrize( + "tuning_curves, expectation", + [ + # tuning_curves + ( + [], + pytest.raises( + TypeError, + match="tuning_curves should be an xr.DataArray as computed by compute_tuning_curves.", + ), + ), + ( + 1, + pytest.raises( + TypeError, + match="tuning_curves should be an xr.DataArray as computed by compute_tuning_curves.", + ), + ), + ( + get_testing_set()[0].to_pandas().T, + pytest.raises( + TypeError, + match="tuning_curves should be an xr.DataArray as computed by compute_tuning_curves.", + ), + ), + (get_testing_set(1, 1)[0], does_not_raise()), + (get_testing_set(1, 2)[0], does_not_raise()), + (get_testing_set(1, 3)[0], does_not_raise()), + (get_testing_set(2, 1)[0], does_not_raise()), + (get_testing_set(2, 2)[0], does_not_raise()), + (get_testing_set(2, 3)[0], does_not_raise()), + ], +) +def test_compute_mutual_information_errors(tuning_curves, expectation): + with expectation: + nap.compute_mutual_information(tuning_curves) + + +@pytest.mark.parametrize( + "n_units, n_features", + [(1, 1), (1, 2), (1, 3)], +) @pytest.mark.parametrize( - "tc, feature, ep, minmax, bitssec, expected_exception", + "pattern", + ["uniform", "onehot"], +) +def test_compute_mutual_information(n_units, n_features, pattern): + tuning_curves, expectation = get_testing_set(n_units, n_features, pattern) + actual = nap.compute_mutual_information(tuning_curves) + xr.testing.assert_allclose(actual, expectation) + + +@pytest.mark.parametrize( + "tc, feature, ep, minmax, bitssec, expectation", [ ( "a", @@ -757,15 +863,13 @@ def test_compute_discrete_tuning_curves(group, dict_ep): ), ], ) -def test_compute_1d_mutual_info_errors( - tc, feature, ep, minmax, bitssec, expected_exception -): - with pytest.raises(TypeError, match=expected_exception): +def test_compute_1d_mutual_info_errors(tc, feature, ep, minmax, bitssec, expectation): + with pytest.raises(TypeError, match=expectation): nap.compute_1d_mutual_info(tc, feature, ep, minmax, bitssec) @pytest.mark.parametrize( - "dict_tc, features, ep, minmax, bitssec, expected_exception", + "dict_tc, features, ep, minmax, bitssec, expectation", [ ( "a", @@ -810,15 +914,15 @@ def test_compute_1d_mutual_info_errors( ], ) def test_compute_2d_mutual_info_errors( - dict_tc, features, ep, minmax, bitssec, expected_exception + dict_tc, features, ep, minmax, bitssec, expectation ): - with pytest.raises(TypeError, match=expected_exception): + with pytest.raises(TypeError, match=expectation): nap.compute_2d_mutual_info(dict_tc, features, ep, minmax, bitssec) @pytest.mark.filterwarnings("ignore") @pytest.mark.parametrize( - "args, kwargs, expected", + "args, kwargs, expectation", [ ( ( @@ -862,7 +966,7 @@ def test_compute_2d_mutual_info_errors( ), ], ) -def test_compute_1d_mutual_info(args, kwargs, expected): +def test_compute_1d_mutual_info(args, kwargs, expectation): tc = args[0] feature = args[1] si = nap.compute_1d_mutual_info(tc, feature, **kwargs) @@ -870,12 +974,12 @@ def test_compute_1d_mutual_info(args, kwargs, expected): assert list(si.columns) == ["SI"] if isinstance(tc, pd.DataFrame): assert list(si.index.values) == list(tc.columns) - np.testing.assert_approx_equal(si.values, expected) + np.testing.assert_approx_equal(si.values, expectation) @pytest.mark.filterwarnings("ignore") @pytest.mark.parametrize( - "args, kwargs, expected", + "args, kwargs, expectation", [ ( ( @@ -934,7 +1038,7 @@ def test_compute_1d_mutual_info(args, kwargs, expected): ), ], ) -def test_compute_2d_mutual_info(args, kwargs, expected): +def test_compute_2d_mutual_info(args, kwargs, expectation): dict_tc = args[0] features = args[1] si = nap.compute_2d_mutual_info(dict_tc, features, **kwargs) @@ -942,7 +1046,7 @@ def test_compute_2d_mutual_info(args, kwargs, expected): assert list(si.columns) == ["SI"] if isinstance(dict_tc, dict): assert list(si.index.values) == list(dict_tc.keys()) - np.testing.assert_approx_equal(si.values, expected) + np.testing.assert_approx_equal(si.values, expectation) # ------------------------------------------------------------------------------------ @@ -952,7 +1056,7 @@ def test_compute_2d_mutual_info(args, kwargs, expected): @pytest.mark.filterwarnings("ignore") @pytest.mark.parametrize( - "args, kwargs, expected", + "args, kwargs, expectation", [ ((get_group(), get_feature(), 10), {}, np.array([10.0] + [0.0] * 9)[:, None]), ( @@ -972,7 +1076,7 @@ def test_compute_2d_mutual_info(args, kwargs, expected): ), ], ) -def test_compute_1d_tuning_curves(args, kwargs, expected): +def test_compute_1d_tuning_curves(args, kwargs, expectation): tc = nap.compute_1d_tuning_curves(*args, **kwargs) # Columns assert list(tc.columns) == list(args[0].keys()) @@ -986,12 +1090,12 @@ def test_compute_1d_tuning_curves(args, kwargs, expected): np.testing.assert_almost_equal(tmp[0:-1] + np.diff(tmp) / 2, tc.index.values) # Array - np.testing.assert_almost_equal(tc.values, expected) + np.testing.assert_almost_equal(tc.values, expectation) @pytest.mark.filterwarnings("ignore") @pytest.mark.parametrize( - "args, kwargs, expected", + "args, kwargs, expectation", [ ((get_group(), get_features(), 10), {}, np.ones((10, 10)) * 0.5), ((get_group(), get_features(), (10, 10)), {}, np.ones((10, 10)) * 0.5), @@ -1012,7 +1116,7 @@ def test_compute_1d_tuning_curves(args, kwargs, expected): ), ], ) -def test_compute_2d_tuning_curves(args, kwargs, expected): +def test_compute_2d_tuning_curves(args, kwargs, expectation): tc, xy = nap.compute_2d_tuning_curves(*args, **kwargs) assert isinstance(tc, dict) @@ -1038,12 +1142,12 @@ def test_compute_2d_tuning_curves(args, kwargs, expected): # Values for i in tc.keys(): assert tc[i].shape == nb_bins - np.testing.assert_almost_equal(tc[i], expected) + np.testing.assert_almost_equal(tc[i], expectation) @pytest.mark.filterwarnings("ignore") @pytest.mark.parametrize( - "args, kwargs, expected", + "args, kwargs, expectation", [ ( (get_tsdframe(), get_feature(), 10), @@ -1077,7 +1181,7 @@ def test_compute_2d_tuning_curves(args, kwargs, expected): ), ], ) -def test_compute_1d_tuning_curves_continuous(args, kwargs, expected): +def test_compute_1d_tuning_curves_continuous(args, kwargs, expectation): tsdframe, feature, nb_bins = args tc = nap.compute_1d_tuning_curves_continuous(tsdframe, feature, nb_bins, **kwargs) # Columns @@ -1091,12 +1195,12 @@ def test_compute_1d_tuning_curves_continuous(args, kwargs, expected): tmp = np.linspace(np.min(feature), np.max(feature), nb_bins + 1) np.testing.assert_almost_equal(tmp[0:-1] + np.diff(tmp) / 2, tc.index.values) # Array - np.testing.assert_almost_equal(tc.values, expected) + np.testing.assert_almost_equal(tc.values, expectation) @pytest.mark.filterwarnings("ignore") @pytest.mark.parametrize( - "tsdframe, nb_bins, kwargs, expected", + "tsdframe, nb_bins, kwargs, expectation", [ ( nap.TsdFrame( @@ -1161,7 +1265,7 @@ def test_compute_1d_tuning_curves_continuous(args, kwargs, expected): ), ], ) -def test_compute_2d_tuning_curves_continuous(tsdframe, nb_bins, kwargs, expected): +def test_compute_2d_tuning_curves_continuous(tsdframe, nb_bins, kwargs, expectation): features = nap.TsdFrame( t=np.arange(100), d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T ) @@ -1195,4 +1299,4 @@ def test_compute_2d_tuning_curves_continuous(tsdframe, nb_bins, kwargs, expected # Values for i in tc.keys(): assert tc[i].shape == nb_bins - np.testing.assert_almost_equal(tc[i], expected[i]) + np.testing.assert_almost_equal(tc[i], expectation[i]) From 908e5dd8d0b5907513af60220a291332e9667665 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 6 Oct 2025 13:53:17 +0000 Subject: [PATCH 140/244] better docstring title --- pynapple/process/tuning_curves.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 9dcc8610f..bfbb09edb 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -56,7 +56,7 @@ def wrapper(*args, **kwargs): if not isinstance(kwargs["dict_ep"], dict): raise TypeError("dict_ep should be a dictionary of IntervalSet") if not all( - [isinstance(v, nap.IntervalSet) for v in kwargs["dict_ep"].values()] + isinstance(v, nap.IntervalSet) for v in kwargs["dict_ep"].values() ): raise TypeError("dict_ep argument should contain only IntervalSet.") if "tc" in kwargs: @@ -655,7 +655,7 @@ def compute_2d_mutual_info_old(dict_tc, features, ep=None, minmax=None, bitssec= def compute_mutual_information(tuning_curves): """ - Mutual information of a tuning curve. + Mutual information of an N-dimensional tuning curve. See: From 4238893b6a1a0997279aba6f92dffb930b98132b Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 6 Oct 2025 13:58:21 +0000 Subject: [PATCH 141/244] formatting --- pynapple/process/tuning_curves.py | 4 +--- tests/test_tuning_curves.py | 1 - 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index bfbb09edb..f6ad65ff9 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -297,9 +297,7 @@ def compute_tuning_curves( keys = ( data.keys() if isinstance(data, nap.TsGroup) - else data.columns - if isinstance(data, nap.TsdFrame) - else [0] + else data.columns if isinstance(data, nap.TsdFrame) else [0] ) tcs = np.zeros([len(keys), *occupancy.shape]) if isinstance(data, (nap.TsGroup, nap.Ts)): diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index ec1cf9499..42bef8d2f 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -769,7 +769,6 @@ def get_testing_set(n_units=1, n_features=1, pattern="uniform"): @pytest.mark.parametrize( "tuning_curves, expectation", [ - # tuning_curves ( [], pytest.raises( From f9e455a02cf81dfa295732a0b7ee6c806298a9e3 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 6 Oct 2025 14:01:03 +0000 Subject: [PATCH 142/244] sorting imports --- pynapple/process/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pynapple/process/__init__.py b/pynapple/process/__init__.py index 37cc03254..58ae66af4 100644 --- a/pynapple/process/__init__.py +++ b/pynapple/process/__init__.py @@ -29,7 +29,6 @@ compute_power_spectral_density, ) from .tuning_curves import ( - compute_mutual_information, compute_1d_mutual_info, compute_1d_tuning_curves, compute_1d_tuning_curves_continuous, @@ -37,6 +36,7 @@ compute_2d_tuning_curves, compute_2d_tuning_curves_continuous, compute_discrete_tuning_curves, + compute_mutual_information, compute_tuning_curves, ) from .warping import build_tensor, warp_tensor From 235e9904a44dee9adaa5676541fb6a4d03e3d412 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 6 Oct 2025 16:29:54 +0000 Subject: [PATCH 143/244] moving old tuning curves + tests to new _old files + docstring --- pynapple/process/__init__.py | 5 +- pynapple/process/tuning_curves.py | 494 +--------------------- pynapple/process/tuning_curves_old.py | 323 ++++++++++++++ tests/test_tuning_curves.py | 11 +- tests/test_tuning_curves_old.py | 584 ++++++++++++++++++++++++++ 5 files changed, 926 insertions(+), 491 deletions(-) create mode 100644 pynapple/process/tuning_curves_old.py create mode 100644 tests/test_tuning_curves_old.py diff --git a/pynapple/process/__init__.py b/pynapple/process/__init__.py index 58ae66af4..7b1ce4606 100644 --- a/pynapple/process/__init__.py +++ b/pynapple/process/__init__.py @@ -28,7 +28,8 @@ compute_mean_power_spectral_density, compute_power_spectral_density, ) -from .tuning_curves import ( +from .tuning_curves import compute_mutual_information, compute_tuning_curves +from .tuning_curves_old import ( compute_1d_mutual_info, compute_1d_tuning_curves, compute_1d_tuning_curves_continuous, @@ -36,8 +37,6 @@ compute_2d_tuning_curves, compute_2d_tuning_curves_continuous, compute_discrete_tuning_curves, - compute_mutual_information, - compute_tuning_curves, ) from .warping import build_tensor, warp_tensor from .wavelets import compute_wavelet_transform, generate_morlet_filterbank diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index f6ad65ff9..ddef2d0e2 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -2,10 +2,7 @@ Functions to compute n-dimensional tuning curves. """ -import inspect import warnings -from collections.abc import Iterable -from functools import wraps import numpy as np import pandas as pd @@ -14,73 +11,6 @@ from .. import core as nap -def _validate_tuning_inputs(func): - @wraps(func) - def wrapper(*args, **kwargs): - # Validate each positional argument - sig = inspect.signature(func) - kwargs = sig.bind_partial(*args, **kwargs).arguments - - if "feature" in kwargs: - if not isinstance(kwargs["feature"], (nap.Tsd, nap.TsdFrame)): - raise TypeError( - "feature should be a Tsd (or TsdFrame with 1 column only)" - ) - if ( - isinstance(kwargs["feature"], nap.TsdFrame) - and not kwargs["feature"].shape[1] == 1 - ): - raise ValueError( - "feature should be a Tsd (or TsdFrame with 1 column only)" - ) - if "features" in kwargs: - if not isinstance(kwargs["features"], nap.TsdFrame): - raise TypeError("features should be a TsdFrame with 2 columns") - if not kwargs["features"].shape[1] == 2: - raise ValueError("features should have 2 columns only.") - if "nb_bins" in kwargs: - if not isinstance(kwargs["nb_bins"], (int, tuple)): - raise TypeError( - "nb_bins should be of type int (or tuple with (int, int) for 2D tuning curves)." - ) - if "group" in kwargs: - if not isinstance(kwargs["group"], nap.TsGroup): - raise TypeError("group should be a TsGroup.") - if "ep" in kwargs: - if not isinstance(kwargs["ep"], nap.IntervalSet): - raise TypeError("ep should be an IntervalSet") - if "minmax" in kwargs: - if not isinstance(kwargs["minmax"], Iterable): - raise TypeError("minmax should be a tuple/list of 2 numbers") - if "dict_ep" in kwargs: - if not isinstance(kwargs["dict_ep"], dict): - raise TypeError("dict_ep should be a dictionary of IntervalSet") - if not all( - isinstance(v, nap.IntervalSet) for v in kwargs["dict_ep"].values() - ): - raise TypeError("dict_ep argument should contain only IntervalSet.") - if "tc" in kwargs: - if not isinstance(kwargs["tc"], (pd.DataFrame, np.ndarray)): - raise TypeError( - "Argument tc should be of type pandas.DataFrame or numpy.ndarray" - ) - if "dict_tc" in kwargs: - if not isinstance(kwargs["dict_tc"], (dict, np.ndarray)): - raise TypeError( - "Argument dict_tc should be a dictionary of numpy.ndarray or numpy.ndarray." - ) - if "bitssec" in kwargs: - if not isinstance(kwargs["bitssec"], bool): - raise TypeError("Argument bitssec should be of type bool") - if "tsdframe" in kwargs: - if not isinstance(kwargs["tsdframe"], (nap.Tsd, nap.TsdFrame)): - raise TypeError("Argument tsdframe should be of type Tsd or TsdFrame.") - # Call the original function with validated inputs - return func(**kwargs) - - return wrapper - - def compute_tuning_curves( data, features, @@ -345,321 +275,9 @@ def compute_tuning_curves( return tcs -@_validate_tuning_inputs -def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): - """ - Deprecated, use `compute_tuning_curves` instead. - """ - warnings.warn( - "compute_1d_tuning_curves is deprecated and will be removed in a future version;" - "use compute_tuning_curves instead.", - DeprecationWarning, - stacklevel=2, - ) - return ( - compute_tuning_curves( - group, - feature, - nb_bins, - range=None if minmax is None else [minmax], - epochs=ep, - ) - .to_pandas() - .T - ) - - -@_validate_tuning_inputs -def compute_1d_tuning_curves_continuous( - tsdframe, feature, nb_bins, ep=None, minmax=None -): - """ - Deprecated, use `compute_tuning_curves` instead. - """ - warnings.warn( - "compute_1d_tuning_curves_continuous is deprecated and will be removed in a future version;" - "use compute_tuning_curves instead.", - DeprecationWarning, - stacklevel=2, - ) - return ( - compute_tuning_curves( - tsdframe, - feature, - nb_bins, - range=None if minmax is None else [minmax], - epochs=ep, - ) - .to_pandas() - .T - ) - - -@_validate_tuning_inputs -def compute_2d_tuning_curves(group, features, nb_bins, ep=None, minmax=None): - """ - Deprecated, use `compute_tuning_curves` instead. - """ - warnings.warn( - "compute_2d_tuning_curves is deprecated and will be removed in a future version;" - "use compute_tuning_curves instead.", - DeprecationWarning, - stacklevel=2, - ) - xarray = compute_tuning_curves( - group, - features, - nb_bins, - range=( - None if minmax is None else [[minmax[0], minmax[1]], [minmax[2], minmax[3]]] - ), - epochs=ep, - ) - tcs = {c: xarray.sel(unit=c).values for c in xarray.coords["unit"].values} - bins = [xarray.coords[dim].values for dim in xarray.coords if dim != "unit"] - return tcs, bins - - -@_validate_tuning_inputs -def compute_2d_tuning_curves_continuous( - tsdframe, features, nb_bins, ep=None, minmax=None -): - """ - Deprecated, use `compute_tuning_curves` instead. - """ - warnings.warn( - "compute_2d_tuning_curves_continuous is deprecated and will be removed in a future version;" - "use compute_tuning_curves instead.", - DeprecationWarning, - stacklevel=2, - ) - xarray = compute_tuning_curves( - tsdframe, - features, - nb_bins, - range=( - None if minmax is None else [[minmax[0], minmax[1]], [minmax[2], minmax[3]]] - ), - epochs=ep, - ) - tcs = {c: xarray.sel(unit=c).values for c in xarray.coords["unit"].values} - bins = [xarray.coords[dim].values for dim in xarray.coords if dim != "unit"] - return tcs, bins - - -@_validate_tuning_inputs -def compute_discrete_tuning_curves(group, dict_ep): - """ - Compute discrete tuning curves of a TsGroup using a dictionary of epochs. - The function returns a pandas DataFrame with each row being a key of the dictionary of epochs - and each column being a neurons. - - This function can typically being used for a set of stimulus being presented for multiple epochs. - An example of the dictionary is: - - >>> dict_ep = { - "stim0": nap.IntervalSet(start=0, end=1), - "stim1":nap.IntervalSet(start=2, end=3) - } - - In this case, the function will return a pandas DataFrame : - - >>> tc - neuron0 neuron1 neuron2 - stim0 0 Hz 1 Hz 2 Hz - stim1 3 Hz 4 Hz 5 Hz - - - Parameters - ---------- - group : nap.TsGroup - The group of Ts/Tsd for which the tuning curves will be computed - dict_ep : dict - Dictionary of IntervalSets - - Returns - ------- - pandas.DataFrame - Table of firing rate for each neuron and each IntervalSet - - Raises - ------ - RuntimeError - If group is not a TsGroup object. - """ - idx = np.sort(list(dict_ep.keys())) - tuning_curves = pd.DataFrame(index=idx, columns=list(group.keys()), data=0.0) - - for k in dict_ep.keys(): - for n in group.keys(): - tuning_curves.loc[k, n] = float(len(group[n].restrict(dict_ep[k]))) - - tuning_curves.loc[k] = tuning_curves.loc[k] / dict_ep[k].tot_length("s") - - return tuning_curves - - -@_validate_tuning_inputs -def compute_1d_mutual_info_old(tc, feature, ep=None, minmax=None, bitssec=False): - """ - Mutual information of a tuning curve computed from a 1-d feature. - - See: - - Skaggs, W. E., McNaughton, B. L., & Gothard, K. M. (1993). - An information-theoretic approach to deciphering the hippocampal code. - In Advances in neural information processing systems (pp. 1030-1037). - - Parameters - ---------- - tc : pandas.DataFrame or numpy.ndarray - Tuning curves in columns - feature : Tsd (or TsdFrame with 1 column only) - The 1-dimensional target feature (e.g. head-direction) - ep : IntervalSet, optional - The epoch over which the tuning curves were computed - If None, the epoch is the time support of the feature. - minmax : tuple or list, optional - The min and max boundaries of the tuning curves. - If None, the boundaries are inferred from the target feature - bitssec : bool, optional - By default, the function return bits per spikes. - Set to true for bits per seconds - - Returns - ------- - pandas.DataFrame - Spatial Information (default is bits/spikes) - """ - if isinstance(tc, pd.DataFrame): - columns = tc.columns.values - fx = np.atleast_2d(tc.values) - else: - fx = np.atleast_2d(tc) - columns = np.arange(tc.shape[1]) - - nb_bins = tc.shape[0] + 1 - if minmax is None: - bins = np.linspace(np.nanmin(feature), np.nanmax(feature), nb_bins) - else: - bins = np.linspace(minmax[0], minmax[1], nb_bins) - - if isinstance(ep, nap.IntervalSet): - occupancy, _ = np.histogram(feature.restrict(ep).values, bins) - else: - occupancy, _ = np.histogram(feature.values, bins) - occupancy = occupancy / occupancy.sum() - occupancy = occupancy[:, np.newaxis] - - fr = np.sum(fx * occupancy, 0) - fxfr = fx / fr - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - logfx = np.log2(fxfr) - logfx[np.isinf(logfx)] = 0.0 - SI = np.sum(occupancy * fx * logfx, 0) - - if bitssec: - SI = pd.DataFrame(index=columns, columns=["SI"], data=SI) - return SI - else: - SI = SI / fr - SI = pd.DataFrame(index=columns, columns=["SI"], data=SI) - return SI - - -@_validate_tuning_inputs -def compute_2d_mutual_info_old(dict_tc, features, ep=None, minmax=None, bitssec=False): - """ - Mutual information of a tuning curve computed from 2-d features. - - See: - - Skaggs, W. E., McNaughton, B. L., & Gothard, K. M. (1993). - An information-theoretic approach to deciphering the hippocampal code. - In Advances in neural information processing systems (pp. 1030-1037). - - Parameters - ---------- - dict_tc : dict of numpy.ndarray or numpy.ndarray - If array, first dimension should be the neuron - features : TsdFrame - The 2 columns features that were used to compute the tuning curves - ep : IntervalSet, optional - The epoch over which the tuning curves were computed - If None, the epoch is the time support of the feature. - minmax : tuple or list, optional - The min and max boundaries of the tuning curves. - If None, the boundaries are inferred from the target features - bitssec : bool, optional - By default, the function return bits per spikes. - Set to true for bits per seconds - - Returns - ------- - pandas.DataFrame - Spatial Information (default is bits/spikes) - """ - # A bit tedious here - if type(dict_tc) is dict: - fx = np.array([dict_tc[i] for i in dict_tc.keys()]) - idx = list(dict_tc.keys()) - else: - fx = dict_tc - idx = np.arange(len(dict_tc)) - - nb_bins = (fx.shape[1] + 1, fx.shape[2] + 1) - - bins = [] - for i in range(2): - if minmax is None: - bins.append( - np.linspace( - np.nanmin(features[:, i]), np.nanmax(features[:, i]), nb_bins[i] - ) - ) - else: - bins.append( - np.linspace(minmax[i + i % 2], minmax[i + 1 + i % 2], nb_bins[i]) - ) - - if isinstance(ep, nap.IntervalSet): - features = features.restrict(ep) - - occupancy, _, _ = np.histogram2d( - features[:, 0].values.flatten(), - features[:, 1].values.flatten(), - [bins[0], bins[1]], - ) - occupancy = occupancy / occupancy.sum() - - fr = np.nansum(fx * occupancy, (1, 2)) - fr = fr[:, np.newaxis, np.newaxis] - fxfr = fx / fr - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - logfx = np.log2(fxfr) - logfx[np.isinf(logfx)] = 0.0 - SI = np.nansum(occupancy * fx * logfx, (1, 2)) - - if bitssec: - SI = pd.DataFrame(index=idx, columns=["SI"], data=SI) - return SI - else: - SI = SI / fr[:, 0, 0] - SI = pd.DataFrame(index=idx, columns=["SI"], data=SI) - return SI - - def compute_mutual_information(tuning_curves): """ - Mutual information of an N-dimensional tuning curve. - - See: - - Skaggs, W. E., McNaughton, B. L., & Gothard, K. M. (1993). - An information-theoretic approach to deciphering the hippocampal code. - In Advances in neural information processing systems (pp. 1030-1037). + Mutual information of an n-dimensional tuning curve. Parameters ---------- @@ -668,8 +286,15 @@ def compute_mutual_information(tuning_curves): Returns ------- - xarray.DataArray - An array containing the spatial information per unit, in both bits/sec and bits/spike. + pd.DataFrame + A table containing the spatial information per unit, in both bits/sec and bits/spike. + + References + ---------- + .. [1] Skaggs, W. E., McNaughton, B. L., & Gothard, K. M. (1993). + An information-theoretic approach to deciphering the hippocampal code. + In Advances in neural information processing systems (pp. 1030-1037). + """ if not isinstance(tuning_curves, xr.DataArray): raise TypeError( @@ -693,101 +318,8 @@ def compute_mutual_information(tuning_curves): with np.errstate(divide="ignore", invalid="ignore"): MI_bits_per_spike = MI_bits_per_sec / fr_scalar - return xr.DataArray( + return pd.DataFrame( data=np.stack([MI_bits_per_sec, MI_bits_per_spike], axis=1), - coords={ - "unit": tuning_curves.coords["unit"], - "bits": ["bits/sec", "bits/spike"], - }, + index=tuning_curves.coords["unit"], + columns=["bits/sec", "bits/spike"], ) - - -@_validate_tuning_inputs -def compute_2d_mutual_info(dict_tc, features, ep=None, minmax=None, bitssec=False): - warnings.warn( - "compute_2d_mutual_info is deprecated and will be removed in a future version;" - "use compute_mutual_information instead.", - DeprecationWarning, - stacklevel=2, - ) - if type(dict_tc) is dict: - tcs = xr.DataArray( - np.array([dict_tc[i] for i in dict_tc.keys()]), - coords={"unit": list(dict_tc.keys())}, - dims=["unit", "0", "1"], - ) - else: - tcs = xr.DataArray( - dict_tc, - coords={"unit": np.arange(len(dict_tc))}, - dims=["unit", "0", "1"], - ) - - nb_bins = (tcs.shape[1] + 1, tcs.shape[2] + 1) - bins = [] - for i in range(2): - if minmax is None: - bins.append( - np.linspace( - np.nanmin(features[:, i]), np.nanmax(features[:, i]), nb_bins[i] - ) - ) - else: - bins.append( - np.linspace(minmax[i + i % 2], minmax[i + 1 + i % 2], nb_bins[i]) - ) - - if isinstance(ep, nap.IntervalSet): - features = features.restrict(ep) - - occupancy, _, _ = np.histogram2d( - features[:, 0].values.flatten(), - features[:, 1].values.flatten(), - [bins[0], bins[1]], - ) - occupancy = occupancy / occupancy.sum() - - tcs.attrs["occupancy"] = occupancy - MI = compute_mutual_information(tcs) - - if bitssec: - return pd.DataFrame(MI.sel(bits="bits/sec").values, columns=["SI"]) - else: - return pd.DataFrame(MI.sel(bits="bits/spike").values, columns=["SI"]) - - -@_validate_tuning_inputs -def compute_1d_mutual_info(tc, feature, ep=None, minmax=None, bitssec=False): - warnings.warn( - "compute_1d_mutual_info is deprecated and will be removed in a future version;" - "use compute_mutual_information instead.", - DeprecationWarning, - stacklevel=2, - ) - if isinstance(tc, pd.DataFrame): - tcs = xr.DataArray( - tc.values.T, coords={"unit": tc.columns.values, "0": tc.index} - ) - else: - tcs = xr.DataArray( - tc.T, coords={"unit": np.arange(tc.shape[1])}, dims=["unit", "0"] - ) - - nb_bins = tc.shape[0] + 1 - if minmax is None: - bins = np.linspace(np.nanmin(feature), np.nanmax(feature), nb_bins) - else: - bins = np.linspace(minmax[0], minmax[1], nb_bins) - - if isinstance(ep, nap.IntervalSet): - occupancy, _ = np.histogram(feature.restrict(ep).values, bins) - else: - occupancy, _ = np.histogram(feature.values, bins) - occupancy = occupancy / occupancy.sum() - tcs.attrs["occupancy"] = occupancy - MI = compute_mutual_information(tcs) - - if bitssec: - return pd.DataFrame(MI.sel(bits="bits/sec").values, columns=["SI"]) - else: - return pd.DataFrame(MI.sel(bits="bits/spike").values, columns=["SI"]) diff --git a/pynapple/process/tuning_curves_old.py b/pynapple/process/tuning_curves_old.py new file mode 100644 index 000000000..90bbf35a3 --- /dev/null +++ b/pynapple/process/tuning_curves_old.py @@ -0,0 +1,323 @@ +""" +Old functions to compute 1- and 2-dimensional tuning curves. +""" + +import inspect +import warnings +from collections.abc import Iterable +from functools import wraps + +import numpy as np +import pandas as pd +import xarray as xr + +from .. import core as nap +from .tuning_curves import compute_mutual_information, compute_tuning_curves + + +def _validate_tuning_inputs(func): + @wraps(func) + def wrapper(*args, **kwargs): + # Validate each positional argument + sig = inspect.signature(func) + kwargs = sig.bind_partial(*args, **kwargs).arguments + + if "feature" in kwargs: + if not isinstance(kwargs["feature"], (nap.Tsd, nap.TsdFrame)): + raise TypeError( + "feature should be a Tsd (or TsdFrame with 1 column only)" + ) + if ( + isinstance(kwargs["feature"], nap.TsdFrame) + and not kwargs["feature"].shape[1] == 1 + ): + raise ValueError( + "feature should be a Tsd (or TsdFrame with 1 column only)" + ) + if "features" in kwargs: + if not isinstance(kwargs["features"], nap.TsdFrame): + raise TypeError("features should be a TsdFrame with 2 columns") + if not kwargs["features"].shape[1] == 2: + raise ValueError("features should have 2 columns only.") + if "nb_bins" in kwargs: + if not isinstance(kwargs["nb_bins"], (int, tuple)): + raise TypeError( + "nb_bins should be of type int (or tuple with (int, int) for 2D tuning curves)." + ) + if "group" in kwargs: + if not isinstance(kwargs["group"], nap.TsGroup): + raise TypeError("group should be a TsGroup.") + if "ep" in kwargs: + if not isinstance(kwargs["ep"], nap.IntervalSet): + raise TypeError("ep should be an IntervalSet") + if "minmax" in kwargs: + if not isinstance(kwargs["minmax"], Iterable): + raise TypeError("minmax should be a tuple/list of 2 numbers") + if "dict_ep" in kwargs: + if not isinstance(kwargs["dict_ep"], dict): + raise TypeError("dict_ep should be a dictionary of IntervalSet") + if not all( + isinstance(v, nap.IntervalSet) for v in kwargs["dict_ep"].values() + ): + raise TypeError("dict_ep argument should contain only IntervalSet.") + if "tc" in kwargs: + if not isinstance(kwargs["tc"], (pd.DataFrame, np.ndarray)): + raise TypeError( + "Argument tc should be of type pandas.DataFrame or numpy.ndarray" + ) + if "dict_tc" in kwargs: + if not isinstance(kwargs["dict_tc"], (dict, np.ndarray)): + raise TypeError( + "Argument dict_tc should be a dictionary of numpy.ndarray or numpy.ndarray." + ) + if "bitssec" in kwargs: + if not isinstance(kwargs["bitssec"], bool): + raise TypeError("Argument bitssec should be of type bool") + if "tsdframe" in kwargs: + if not isinstance(kwargs["tsdframe"], (nap.Tsd, nap.TsdFrame)): + raise TypeError("Argument tsdframe should be of type Tsd or TsdFrame.") + # Call the original function with validated inputs + return func(**kwargs) + + return wrapper + + +@_validate_tuning_inputs +def compute_discrete_tuning_curves(group, dict_ep): + """ + Compute discrete tuning curves of a TsGroup using a dictionary of epochs. + The function returns a pandas DataFrame with each row being a key of the dictionary of epochs + and each column being a neurons. + + This function can typically being used for a set of stimulus being presented for multiple epochs. + An example of the dictionary is: + + >>> dict_ep = { + "stim0": nap.IntervalSet(start=0, end=1), + "stim1":nap.IntervalSet(start=2, end=3) + } + + In this case, the function will return a pandas DataFrame : + + >>> tc + neuron0 neuron1 neuron2 + stim0 0 Hz 1 Hz 2 Hz + stim1 3 Hz 4 Hz 5 Hz + + + Parameters + ---------- + group : nap.TsGroup + The group of Ts/Tsd for which the tuning curves will be computed + dict_ep : dict + Dictionary of IntervalSets + + Returns + ------- + pandas.DataFrame + Table of firing rate for each neuron and each IntervalSet + + Raises + ------ + RuntimeError + If group is not a TsGroup object. + """ + idx = np.sort(list(dict_ep.keys())) + tuning_curves = pd.DataFrame(index=idx, columns=list(group.keys()), data=0.0) + + for k in dict_ep.keys(): + for n in group.keys(): + tuning_curves.loc[k, n] = float(len(group[n].restrict(dict_ep[k]))) + + tuning_curves.loc[k] = tuning_curves.loc[k] / dict_ep[k].tot_length("s") + + return tuning_curves + + +@_validate_tuning_inputs +def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): + """ + Deprecated, use `compute_tuning_curves` instead. + """ + warnings.warn( + "compute_1d_tuning_curves is deprecated and will be removed in a future version;" + "use compute_tuning_curves instead.", + DeprecationWarning, + stacklevel=2, + ) + return ( + compute_tuning_curves( + group, + feature, + nb_bins, + range=None if minmax is None else [minmax], + epochs=ep, + ) + .to_pandas() + .T + ) + + +@_validate_tuning_inputs +def compute_1d_tuning_curves_continuous( + tsdframe, feature, nb_bins, ep=None, minmax=None +): + """ + Deprecated, use `compute_tuning_curves` instead. + """ + warnings.warn( + "compute_1d_tuning_curves_continuous is deprecated and will be removed in a future version;" + "use compute_tuning_curves instead.", + DeprecationWarning, + stacklevel=2, + ) + return ( + compute_tuning_curves( + tsdframe, + feature, + nb_bins, + range=None if minmax is None else [minmax], + epochs=ep, + ) + .to_pandas() + .T + ) + + +@_validate_tuning_inputs +def compute_2d_tuning_curves(group, features, nb_bins, ep=None, minmax=None): + """ + Deprecated, use `compute_tuning_curves` instead. + """ + warnings.warn( + "compute_2d_tuning_curves is deprecated and will be removed in a future version;" + "use compute_tuning_curves instead.", + DeprecationWarning, + stacklevel=2, + ) + xarray = compute_tuning_curves( + group, + features, + nb_bins, + range=( + None if minmax is None else [[minmax[0], minmax[1]], [minmax[2], minmax[3]]] + ), + epochs=ep, + ) + tcs = {c: xarray.sel(unit=c).values for c in xarray.coords["unit"].values} + bins = [xarray.coords[dim].values for dim in xarray.coords if dim != "unit"] + return tcs, bins + + +@_validate_tuning_inputs +def compute_2d_tuning_curves_continuous( + tsdframe, features, nb_bins, ep=None, minmax=None +): + """ + Deprecated, use `compute_tuning_curves` instead. + """ + warnings.warn( + "compute_2d_tuning_curves_continuous is deprecated and will be removed in a future version;" + "use compute_tuning_curves instead.", + DeprecationWarning, + stacklevel=2, + ) + xarray = compute_tuning_curves( + tsdframe, + features, + nb_bins, + range=( + None if minmax is None else [[minmax[0], minmax[1]], [minmax[2], minmax[3]]] + ), + epochs=ep, + ) + tcs = {c: xarray.sel(unit=c).values for c in xarray.coords["unit"].values} + bins = [xarray.coords[dim].values for dim in xarray.coords if dim != "unit"] + return tcs, bins + + +@_validate_tuning_inputs +def compute_2d_mutual_info(dict_tc, features, ep=None, minmax=None, bitssec=False): + warnings.warn( + "compute_2d_mutual_info is deprecated and will be removed in a future version;" + "use compute_mutual_information instead.", + DeprecationWarning, + stacklevel=2, + ) + if type(dict_tc) is dict: + tcs = xr.DataArray( + np.array([dict_tc[i] for i in dict_tc.keys()]), + coords={"unit": list(dict_tc.keys())}, + dims=["unit", "0", "1"], + ) + else: + tcs = xr.DataArray( + dict_tc, + coords={"unit": np.arange(len(dict_tc))}, + dims=["unit", "0", "1"], + ) + + nb_bins = (tcs.shape[1] + 1, tcs.shape[2] + 1) + bins = [] + for i in range(2): + if minmax is None: + bins.append( + np.linspace( + np.nanmin(features[:, i]), np.nanmax(features[:, i]), nb_bins[i] + ) + ) + else: + bins.append( + np.linspace(minmax[i + i % 2], minmax[i + 1 + i % 2], nb_bins[i]) + ) + + if isinstance(ep, nap.IntervalSet): + features = features.restrict(ep) + + occupancy, _, _ = np.histogram2d( + features[:, 0].values.flatten(), + features[:, 1].values.flatten(), + [bins[0], bins[1]], + ) + occupancy = occupancy / occupancy.sum() + + tcs.attrs["occupancy"] = occupancy + MI = compute_mutual_information(tcs) + + column = "bits/sec" if bitssec else "bits/spike" + return MI[[column]].rename({column: "SI"}, axis=1) + + +@_validate_tuning_inputs +def compute_1d_mutual_info(tc, feature, ep=None, minmax=None, bitssec=False): + warnings.warn( + "compute_1d_mutual_info is deprecated and will be removed in a future version;" + "use compute_mutual_information instead.", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(tc, pd.DataFrame): + tcs = xr.DataArray( + tc.values.T, coords={"unit": tc.columns.values, "0": tc.index} + ) + else: + tcs = xr.DataArray( + tc.T, coords={"unit": np.arange(tc.shape[1])}, dims=["unit", "0"] + ) + + nb_bins = tc.shape[0] + 1 + if minmax is None: + bins = np.linspace(np.nanmin(feature), np.nanmax(feature), nb_bins) + else: + bins = np.linspace(minmax[0], minmax[1], nb_bins) + + if isinstance(ep, nap.IntervalSet): + occupancy, _ = np.histogram(feature.restrict(ep).values, bins) + else: + occupancy, _ = np.histogram(feature.values, bins) + occupancy = occupancy / occupancy.sum() + tcs.attrs["occupancy"] = occupancy + MI = compute_mutual_information(tcs) + + column = "bits/sec" if bitssec else "bits/spike" + return MI[[column]].rename({column: "SI"}, axis=1) diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 42bef8d2f..5d580883a 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -748,7 +748,7 @@ def get_testing_set(n_units=1, n_features=1, pattern="uniform"): attrs={"occupancy": np.ones(shape[1:]) / np.prod(shape[1:])}, ) - MI = xr.DataArray( + MI = pd.DataFrame( data=np.stack( [ np.full(n_units, expected_mi_per_sec), @@ -756,11 +756,8 @@ def get_testing_set(n_units=1, n_features=1, pattern="uniform"): ], axis=1, ), - coords={ - "unit": coords["unit"], - "bits": ["bits/sec", "bits/spike"], - }, - dims=["unit", "bits"], + index=coords["unit"], + columns=["bits/sec", "bits/spike"], ) return tuning_curves, MI @@ -814,7 +811,7 @@ def test_compute_mutual_information_errors(tuning_curves, expectation): def test_compute_mutual_information(n_units, n_features, pattern): tuning_curves, expectation = get_testing_set(n_units, n_features, pattern) actual = nap.compute_mutual_information(tuning_curves) - xr.testing.assert_allclose(actual, expectation) + pd.testing.assert_frame_equal(actual, expectation) @pytest.mark.parametrize( diff --git a/tests/test_tuning_curves_old.py b/tests/test_tuning_curves_old.py new file mode 100644 index 000000000..48659c930 --- /dev/null +++ b/tests/test_tuning_curves_old.py @@ -0,0 +1,584 @@ +"""Tests of old 1- and 2-dimensional tuning curves for `pynapple` package.""" + +import numpy as np +import pandas as pd +import pytest + +import pynapple as nap + + +def get_group(): + return nap.TsGroup({0: nap.Ts(t=np.arange(0, 100))}) + + +def get_feature(): + return nap.Tsd( + t=np.arange(0, 100, 0.1), + d=np.arange(0, 100, 0.1) % 1.0, + time_support=nap.IntervalSet(0, 100), + ) + + +def get_features(): + tmp = np.vstack( + (np.repeat(np.arange(0, 100), 10), np.tile(np.arange(0, 100), 10)) + ).T + return nap.TsdFrame( + t=np.arange(0, 200, 0.1), + d=np.vstack((tmp, tmp[::-1])), + time_support=nap.IntervalSet(0, 200), + ) + + +def get_ep(): + return nap.IntervalSet(start=0, end=50) + + +def get_tsdframe(): + return nap.TsdFrame(t=np.arange(0, 100), d=np.ones((100, 2))) + + +# ------------------------------------------------------------------------------------ +# MUTUAL INFORMATION TESTS +# ------------------------------------------------------------------------------------ + + +@pytest.mark.parametrize( + "tc, feature, ep, minmax, bitssec, expectation", + [ + ( + "a", + get_feature(), + get_ep(), + (0, 1), + True, + "Argument tc should be of type pandas.DataFrame or numpy.ndarray", + ), + ( + pd.DataFrame(), + "a", + get_ep(), + (0, 1), + True, + r"feature should be a Tsd \(or TsdFrame with 1 column only\)", + ), + ( + pd.DataFrame(), + get_feature(), + "a", + (0, 1), + True, + r"ep should be an IntervalSet", + ), + ( + pd.DataFrame(), + get_feature(), + get_ep(), + 1, + True, + r"minmax should be a tuple\/list of 2 numbers", + ), + ( + pd.DataFrame(), + get_feature(), + get_ep(), + (0, 1), + "a", + r"Argument bitssec should be of type bool", + ), + ], +) +def test_compute_1d_mutual_info_errors(tc, feature, ep, minmax, bitssec, expectation): + with pytest.raises(TypeError, match=expectation): + nap.compute_1d_mutual_info(tc, feature, ep, minmax, bitssec) + + +@pytest.mark.parametrize( + "dict_tc, features, ep, minmax, bitssec, expectation", + [ + ( + "a", + get_features(), + get_ep(), + (0, 1), + True, + "Argument dict_tc should be a dictionary of numpy.ndarray or numpy.ndarray", + ), + ( + {0: np.zeros((2, 2))}, + "a", + get_ep(), + (0, 1), + True, + r"features should be a TsdFrame with 2 columns", + ), + ( + {0: np.zeros((2, 2))}, + get_features(), + "a", + (0, 1), + True, + r"ep should be an IntervalSet", + ), + ( + {0: np.zeros((2, 2))}, + get_features(), + get_ep(), + 1, + True, + r"minmax should be a tuple\/list of 2 numbers", + ), + ( + {0: np.zeros((2, 2))}, + get_features(), + get_ep(), + (0, 1), + "a", + r"Argument bitssec should be of type bool", + ), + ], +) +def test_compute_2d_mutual_info_errors( + dict_tc, features, ep, minmax, bitssec, expectation +): + with pytest.raises(TypeError, match=expectation): + nap.compute_2d_mutual_info(dict_tc, features, ep, minmax, bitssec) + + +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize( + "args, kwargs, expectation", + [ + ( + ( + pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), + nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), + ), + {}, + np.array([[1.0]]), + ), + ( + ( + pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), + nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), + ), + {"bitssec": True}, + np.array([[5.0]]), + ), + ( + ( + pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), + nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), + ), + {"ep": nap.IntervalSet(start=0, end=49)}, + np.array([[1.0]]), + ), + ( + ( + pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), + nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), + ), + {"minmax": (0, 1)}, + np.array([[1.0]]), + ), + ( + ( + np.array([[0], [10]]), + nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), + ), + {"minmax": (0, 1)}, + np.array([[1.0]]), + ), + ], +) +def test_compute_1d_mutual_info(args, kwargs, expectation): + tc = args[0] + feature = args[1] + si = nap.compute_1d_mutual_info(tc, feature, **kwargs) + assert isinstance(si, pd.DataFrame) + assert list(si.columns) == ["SI"] + if isinstance(tc, pd.DataFrame): + assert list(si.index.values) == list(tc.columns) + np.testing.assert_approx_equal(si.values, expectation) + + +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize( + "args, kwargs, expectation", + [ + ( + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), + ), + {}, + np.array([[2.0]]), + ), + ( + ( + np.array([[[0, 1], [0, 0]]]), + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), + ), + {}, + np.array([[2.0]]), + ), + ( + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), + ), + {"bitssec": True}, + np.array([[0.5]]), + ), + ( + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), + ), + {"ep": nap.IntervalSet(start=0, end=7)}, + np.array([[2.0]]), + ), + ( + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), + ), + {"minmax": (0, 1, 0, 1)}, + np.array([[2.0]]), + ), + ], +) +def test_compute_2d_mutual_info(args, kwargs, expectation): + dict_tc = args[0] + features = args[1] + si = nap.compute_2d_mutual_info(dict_tc, features, **kwargs) + assert isinstance(si, pd.DataFrame) + assert list(si.columns) == ["SI"] + if isinstance(dict_tc, dict): + assert list(si.index.values) == list(dict_tc.keys()) + np.testing.assert_approx_equal(si.values, expectation) + + +# ------------------------------------------------------------------------------------ +# OLD TUNING CURVE TESTS +# ------------------------------------------------------------------------------------ + + +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize( + "args, kwargs, expectation", + [ + ((get_group(), get_feature(), 10), {}, np.array([10.0] + [0.0] * 9)[:, None]), + ( + (get_group(), get_feature(), 10), + {"ep": get_ep()}, + np.array([10.0] + [0.0] * 9)[:, None], + ), + ( + (get_group(), get_feature(), 10), + {"minmax": (0, 0.9)}, + np.array([10.0] + [0.0] * 9)[:, None], + ), + ( + (get_group(), get_feature(), 20), + {"minmax": (0, 1.9)}, + np.array([10.0] + [0.0] * 9 + [np.nan] * 10)[:, None], + ), + ], +) +def test_compute_1d_tuning_curves(args, kwargs, expectation): + tc = nap.compute_1d_tuning_curves(*args, **kwargs) + # Columns + assert list(tc.columns) == list(args[0].keys()) + + # Index + assert len(tc) == args[2] + if "minmax" in kwargs: + tmp = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], args[2] + 1) + else: + tmp = np.linspace(np.min(args[1]), np.max(args[1]), args[2] + 1) + np.testing.assert_almost_equal(tmp[0:-1] + np.diff(tmp) / 2, tc.index.values) + + # Array + np.testing.assert_almost_equal(tc.values, expectation) + + +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize( + "args, kwargs, expectation", + [ + ((get_group(), get_features(), 10), {}, np.ones((10, 10)) * 0.5), + ((get_group(), get_features(), (10, 10)), {}, np.ones((10, 10)) * 0.5), + ( + (get_group(), get_features(), 10), + {"ep": nap.IntervalSet(0, 400)}, + np.ones((10, 10)) * 0.5, + ), + ( + (get_group(), get_features(), 10), + {"minmax": (0, 100, 0, 100)}, + np.ones((10, 10)) * 0.5, + ), + ( + (get_group(), get_features(), 10), + {"minmax": (0, 200, 0, 100)}, + np.vstack((np.ones((5, 10)) * 0.5, np.ones((5, 10)) * np.nan)), + ), + ], +) +def test_compute_2d_tuning_curves(args, kwargs, expectation): + tc, xy = nap.compute_2d_tuning_curves(*args, **kwargs) + assert isinstance(tc, dict) + + # Keys + assert list(tc.keys()) == list(args[0].keys()) + + # Index + assert isinstance(xy, list) + assert len(xy) == 2 + nb_bins = args[2] + if isinstance(args[2], int): + nb_bins = (args[2], args[2]) + if "minmax" in kwargs: + tmp1 = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins[0] + 1) + tmp2 = np.linspace(kwargs["minmax"][2], kwargs["minmax"][3], nb_bins[1] + 1) + else: + tmp1 = np.linspace(np.min(args[1][:, 0]), np.max(args[1][:, 0]), nb_bins[0] + 1) + tmp2 = np.linspace(np.min(args[1][:, 1]), np.max(args[1][:, 1]), nb_bins[1] + 1) + + np.testing.assert_almost_equal(tmp1[0:-1] + np.diff(tmp1) / 2, xy[0]) + np.testing.assert_almost_equal(tmp2[0:-1] + np.diff(tmp2) / 2, xy[1]) + + # Values + for i in tc.keys(): + assert tc[i].shape == nb_bins + np.testing.assert_almost_equal(tc[i], expectation) + + +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize( + "args, kwargs, expectation", + [ + ( + (get_tsdframe(), get_feature(), 10), + {}, + np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), + ), + ( + (get_tsdframe(), get_feature()[:, np.newaxis], 10), + {}, + np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), + ), + ( + (get_tsdframe()[:, 0], get_feature(), 10), + {}, + np.vstack((np.ones((1, 1)), np.zeros((9, 1)))), + ), + ( + (get_tsdframe(), get_feature(), 10), + {"ep": get_ep()}, + np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), + ), + ( + (get_tsdframe(), get_feature(), 10), + {"minmax": (0, 0.9)}, + np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), + ), + ( + (get_tsdframe(), get_feature(), 20), + {"minmax": (0, 1.9)}, + np.vstack((np.ones((1, 2)), np.zeros((9, 2)), np.ones((10, 2)) * np.nan)), + ), + ], +) +def test_compute_1d_tuning_curves_continuous(args, kwargs, expectation): + tsdframe, feature, nb_bins = args + tc = nap.compute_1d_tuning_curves_continuous(tsdframe, feature, nb_bins, **kwargs) + # Columns + if hasattr(tsdframe, "columns"): + assert list(tc.columns) == list(tsdframe.columns) + # Index + assert len(tc) == nb_bins + if "minmax" in kwargs: + tmp = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins + 1) + else: + tmp = np.linspace(np.min(feature), np.max(feature), nb_bins + 1) + np.testing.assert_almost_equal(tmp[0:-1] + np.diff(tmp) / 2, tc.index.values) + # Array + np.testing.assert_almost_equal(tc.values, expectation) + + +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize( + "tsdframe, nb_bins, kwargs, expectation", + [ + ( + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), + ), + 1, + {}, + {0: np.array([[1.0]]), 1: np.array([[2.0]])}, + ), + ( + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), + columns=["x", "y"], + ), + 2, + {}, + {"x": np.ones((2, 2)), "y": np.ones((2, 2)) * 2}, + ), + ( + nap.Tsd(t=np.arange(0, 100), d=np.hstack((np.ones((100,)) * 2))), + 2, + {}, + {0: np.ones((2, 2)) * 2}, + ), + ( + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), + ), + (1, 2), + {}, + {0: np.array([[1.0, 1.0]]), 1: np.array([[2.0, 2.0]])}, + ), + ( + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), + ), + 1, + {"ep": get_ep()}, + {0: np.array([[1.0]]), 1: np.array([[2.0]])}, + ), + ( + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), + ), + 1, + {"minmax": (0, 1, 0, 1)}, + {0: np.array([[1.0]]), 1: np.array([[2.0]])}, + ), + ( + nap.TsdFrame( + t=np.arange(0, 100), + d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), + ), + (1, 3), + {"minmax": (0, 1, 0, 3)}, + {0: np.array([[1.0, 1.0, np.nan]]), 1: np.array([[2.0, 2.0, np.nan]])}, + ), + ], +) +def test_compute_2d_tuning_curves_continuous(tsdframe, nb_bins, kwargs, expectation): + features = nap.TsdFrame( + t=np.arange(100), d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T + ) + tc, xy = nap.compute_2d_tuning_curves_continuous( + tsdframe, features, nb_bins, **kwargs + ) + + # Keys + if hasattr(tsdframe, "columns"): + assert list(tc.keys()) == list(tsdframe.columns) + + # Index + assert isinstance(xy, list) + assert len(xy) == 2 + if isinstance(nb_bins, int): + nb_bins = (nb_bins, nb_bins) + if "minmax" in kwargs: + tmp1 = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins[0] + 1) + tmp2 = np.linspace(kwargs["minmax"][2], kwargs["minmax"][3], nb_bins[1] + 1) + else: + tmp1 = np.linspace( + np.min(features[:, 0]), np.max(features[:, 0]), nb_bins[0] + 1 + ) + tmp2 = np.linspace( + np.min(features[:, 1]), np.max(features[:, 1]), nb_bins[1] + 1 + ) + + np.testing.assert_almost_equal(tmp1[0:-1] + np.diff(tmp1) / 2, xy[0]) + np.testing.assert_almost_equal(tmp2[0:-1] + np.diff(tmp2) / 2, xy[1]) + + # Values + for i in tc.keys(): + assert tc[i].shape == nb_bins + np.testing.assert_almost_equal(tc[i], expectation[i]) + + +# ------------------------------------------------------------------------------------ +# DISCRETE TUNING CURVE TESTS +# ------------------------------------------------------------------------------------ + + +@pytest.mark.parametrize( + "group, dict_ep, expectation", + [ + ( + "a", + { + 0: nap.IntervalSet(start=0, end=50), + 1: nap.IntervalSet(start=50, end=100), + }, + pytest.raises(TypeError, match="group should be a TsGroup."), + ), + ( + get_group(), + "a", + pytest.raises( + TypeError, match="dict_ep should be a dictionary of IntervalSet" + ), + ), + ( + get_group(), + {0: "a", 1: nap.IntervalSet(start=50, end=100)}, + pytest.raises( + TypeError, match="dict_ep argument should contain only IntervalSet." + ), + ), + ], +) +def test_compute_discrete_tuning_curves_errors(group, dict_ep, expectation): + with expectation: + nap.compute_discrete_tuning_curves(group, dict_ep) + + +@pytest.mark.parametrize("group", [get_group()]) +@pytest.mark.parametrize( + "dict_ep", + [ + {0: nap.IntervalSet(start=0, end=50), 1: nap.IntervalSet(start=50, end=100)}, + { + "0": nap.IntervalSet(start=0, end=50), + "1": nap.IntervalSet(start=50, end=100), + }, + ], +) +def test_compute_discrete_tuning_curves(group, dict_ep): + tc = nap.compute_discrete_tuning_curves(group, dict_ep) + assert len(tc) == 2 + assert list(tc.columns) == list(group.keys()) + assert list(tc.index.values) == list(dict_ep.keys()) + np.testing.assert_almost_equal(tc.iloc[0, 0], 51 / 50) + np.testing.assert_almost_equal(tc.iloc[1, 0], 1) From 566554b2a7bdd52a6afaea07d88972d83bbc6f9d Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 6 Oct 2025 17:07:18 +0000 Subject: [PATCH 144/244] fix docstrings --- pynapple/process/tuning_curves.py | 177 +++++++++++++++--------------- 1 file changed, 89 insertions(+), 88 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index ddef2d0e2..b223eb506 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -67,107 +67,108 @@ def compute_tuning_curves( -------- In the simplest case, we can pass a group of spikes per neuron and a single feature: - >>> import pynapple as nap - >>> import numpy as np; np.random.seed(42) - >>> group = { - ... 1: nap.Ts(np.arange(0, 100, 0.1)), - ... 2: nap.Ts(np.arange(0, 100, 0.2)) - ... } - >>> feature = nap.Tsd(d=np.arange(0, 100, 0.1) % 1, t=np.arange(0, 100, 0.1)) - >>> tcs = nap.compute_tuning_curves(group, feature, bins=10) - >>> tcs - Size: 160B - array([[10., 10., 10., 10., 10., 10., 10., 10., 10., 10.], - [10., 0., 10., 0., 10., 0., 10., 0., 10., 0.]]) - Coordinates: - * unit (unit) int64 16B 1 2 - * 0 (0) float64 80B 0.045 0.135 0.225 0.315 ... 0.585 0.675 0.765 0.855 - Attributes: - occupancy: [100. 100. 100. 100. 100. 100. 100. 100. 100. 100.] - bin_edges: [array([0. , 0.09, 0.18, 0.27, 0.36, 0.45, 0.54, 0.63, 0.72,... + >>> import pynapple as nap + >>> import numpy as np; np.random.seed(42) + >>> group = nap.TsGroup({ + ... 1: nap.Ts(np.arange(0, 100, 0.1)), + ... 2: nap.Ts(np.arange(0, 100, 0.2)) + ... }) + >>> feature = nap.Tsd(d=np.arange(0, 100, 0.1) % 1, t=np.arange(0, 100, 0.1)) + >>> tcs = nap.compute_tuning_curves(group, feature, bins=10) + >>> tcs + Size: 160B + array([[10., 10., 10., 10., 10., 10., 10., 10., 10., 10.], + [10., 0., 10., 0., 10., 0., 10., 0., 10., 0.]]) + Coordinates: + * unit (unit) int64 16B 1 2 + * 0 (0) float64 80B 0.045 0.135 0.225 0.315 ... 0.585 0.675 0.765 0.855 + Attributes: + occupancy: [100. 100. 100. 100. 100. 100. 100. 100. 100. 100.] + bin_edges: [array([0. , 0.09, 0.18, 0.27, 0.36, 0.45, 0.54, 0.63, 0.72,... The function can also take multiple features, in which case it computes n-dimensional tuning curves. We can specify the number of bins for each feature: - >>> features = nap.TsdFrame( - ... d=np.stack( - ... [ - ... np.arange(0, 100, 0.1) % 1, - ... np.arange(0, 100, 0.1) % 2 - ... ], - ... axis=1 - ... ), - ... t=np.arange(0, 100, 0.1) - ... ) - >>> tcs = nap.compute_tuning_curves(group, features, bins=[5, 3]) - >>> tcs - Size: 240B - array([[[10., 10., nan], - [10., 10., 10.], - [10., nan, 10.], - [10., 10., 10.], - [nan, 10., 10.]], - ... - [[ 5., 5., nan], - [ 5., 10., 0.], - [ 5., nan, 5.], - [10., 0., 5.], - [nan, 5., 5.]]]) - Coordinates: - * unit (unit) int64 16B 1 2 - * 0 (0) float64 40B 0.09 0.27 0.45 0.63 0.81 - * 1 (1) float64 24B 0.3167 0.95 1.583 - Attributes: - occupancy: [[100. 100. nan]\\n [100. 50. 50.]\\n [100. nan 100.]\\n [ 5... - bin_edges: [array([0. , 0.18, 0.36, 0.54, 0.72, 0.9 ]), array([0. ... + >>> features = nap.TsdFrame( + ... d=np.stack( + ... [ + ... np.arange(0, 100, 0.1) % 1, + ... np.arange(0, 100, 0.1) % 2 + ... ], + ... axis=1 + ... ), + ... t=np.arange(0, 100, 0.1) + ... ) + >>> tcs = nap.compute_tuning_curves(group, features, bins=[5, 3]) + >>> tcs + Size: 240B + array([[[10., 10., nan], + [10., 10., 10.], + [10., nan, 10.], + [10., 10., 10.], + [nan, 10., 10.]], + ... + [[ 5., 5., nan], + [ 5., 10., 0.], + [ 5., nan, 5.], + [10., 0., 5.], + [nan, 5., 5.]]]) + Coordinates: + * unit (unit) int64 16B 1 2 + * 0 (0) float64 40B 0.09 0.27 0.45 0.63 0.81 + * 1 (1) float64 24B 0.3167 0.95 1.583 + Attributes: + occupancy: [[100. 100. nan]\\n [100. 50. 50.]\\n [100. nan 100.]\\n [ 5... + bin_edges: [array([0. , 0.18, 0.36, 0.54, 0.72, 0.9 ]), array([0. ... Or even specify the bin edges directly: - >>> tcs = nap.compute_tuning_curves( - ... group, - ... features, - ... bins=[np.linspace(0, 1, 5), np.linspace(0, 2, 3)] - ... ) - >>> tcs - Size: 128B - array([[[10. , 10. ], - [10. , 10. ], - [10. , 10. ], - [10. , 10. ]], - ... - [[ 6.66666667, 6.66666667], - [ 5. , 5. ], - [ 3.33333333, 3.33333333], - [ 5. , 5. ]]]) - Coordinates: - * unit (unit) int64 16B 1 2 - * 0 (0) float64 32B 0.125 0.375 0.625 0.875 - * 1 (1) float64 16B 0.5 1.5 - Attributes: - occupancy: [[150. 150.]\\n [100. 100.]\\n [150. 150.]\\n [100. 100.]] - bin_edges: [array([0. , 0.25, 0.5 , 0.75, 1. ]), array([0., 1., 2.])] + >>> tcs = nap.compute_tuning_curves( + ... group, + ... features, + ... bins=[np.linspace(0, 1, 5), np.linspace(0, 2, 3)] + ... ) + >>> tcs + Size: 128B + array([[[10. , 10. ], + [10. , 10. ], + [10. , 10. ], + [10. , 10. ]], + ... + [[ 6.66666667, 6.66666667], + [ 5. , 5. ], + [ 3.33333333, 3.33333333], + [ 5. , 5. ]]]) + Coordinates: + * unit (unit) int64 16B 1 2 + * 0 (0) float64 32B 0.125 0.375 0.625 0.875 + * 1 (1) float64 16B 0.5 1.5 + Attributes: + occupancy: [[150. 150.]\\n [100. 100.]\\n [150. 150.]\\n [100. 100.]] + bin_edges: [array([0. , 0.25, 0.5 , 0.75, 1. ]), array([0., 1., 2.])] In all of these cases, it is also possible to pass continuous values instead of spikes (e.g. calcium imaging data): - >>> frame = nap.TsdFrame(d=np.random.rand(2000, 3), t=np.arange(0, 100, 0.05)) - >>> tcs = nap.compute_tuning_curves(frame, feature, bins=10) - >>> tcs - Size: 240B - array([[0.49147343, 0.50190395, 0.50971339, 0.50128013, 0.54332711, - 0.49712328, 0.49594611, 0.5110517 , 0.52247351, 0.52057658], - [0.51132036, 0.46410557, 0.47732505, 0.49830908, 0.53523019, - 0.53099429, 0.48668499, 0.44198555, 0.49222208, 0.47453398], - [0.46591801, 0.50662914, 0.46875882, 0.48734997, 0.51836574, - 0.50722266, 0.48943577, 0.49730095, 0.47944075, 0.48623693]]) - Coordinates: - * unit (unit) int64 24B 0 1 2 - * 0 (0) float64 80B 0.045 0.135 0.225 0.315 ... 0.585 0.675 0.765 0.855 - Attributes: - occupancy: [100. 100. 100. 100. 100. 100. 100. 100. 100. 100.] - bin_edges: [array([0. , 0.09, 0.18, 0.27, 0.36, 0.45, 0.54, 0.63, 0.72,... + >>> frame = nap.TsdFrame(d=np.random.rand(2000, 3), t=np.arange(0, 100, 0.05)) + >>> tcs = nap.compute_tuning_curves(frame, feature, bins=10) + >>> tcs + Size: 240B + array([[0.49147343, 0.50190395, 0.50971339, 0.50128013, 0.54332711, + 0.49712328, 0.49594611, 0.5110517 , 0.52247351, 0.52057658], + [0.51132036, 0.46410557, 0.47732505, 0.49830908, 0.53523019, + 0.53099429, 0.48668499, 0.44198555, 0.49222208, 0.47453398], + [0.46591801, 0.50662914, 0.46875882, 0.48734997, 0.51836574, + 0.50722266, 0.48943577, 0.49730095, 0.47944075, 0.48623693]]) + Coordinates: + * unit (unit) int64 24B 0 1 2 + * 0 (0) float64 80B 0.045 0.135 0.225 0.315 ... 0.585 0.675 0.765 0.855 + Attributes: + occupancy: [100. 100. 100. 100. 100. 100. 100. 100. 100. 100.] + bin_edges: [array([0. , 0.09, 0.18, 0.27, 0.36, 0.45, 0.54, 0.63, 0.72,... """ # check data + print(type(data)) if not isinstance(data, (nap.TsdFrame, nap.TsGroup, nap.Ts, nap.Tsd)): raise TypeError("data should be a TsdFrame, TsGroup, Ts, or Tsd.") From 0245236f7c5dd5f93894ae252477245f8151ec83 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 6 Oct 2025 17:25:42 +0000 Subject: [PATCH 145/244] remove print --- pynapple/process/tuning_curves.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index b223eb506..f45f9cfae 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -168,7 +168,6 @@ def compute_tuning_curves( """ # check data - print(type(data)) if not isinstance(data, (nap.TsdFrame, nap.TsGroup, nap.Ts, nap.Tsd)): raise TypeError("data should be a TsdFrame, TsGroup, Ts, or Tsd.") From 660e9e1e96146d41f47ab5ff51b0973edae29547 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 6 Oct 2025 17:51:09 +0000 Subject: [PATCH 146/244] fix link --- doc/user_guide/06_tuning_curves.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index f15857f34..54524688a 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -350,7 +350,7 @@ dict_ep = { } ``` -[`nap.compute_discrete_tuning_curves`](pynapple.process.tuning_curves.compute_discrete_tuning_curves) takes a `TsGroup` for spiking activity and a dictionary of epochs. +[`nap.compute_discrete_tuning_curves`](pynapple.process.tuning_curves_old.compute_discrete_tuning_curves) takes a `TsGroup` for spiking activity and a dictionary of epochs. The output is a pandas DataFrame where each column is a unit in the `TsGroup` and each row is one `IntervalSet`. The output will be the mean firing rate of the neuron during this set of intervals. From b15d4a6dab6ffdcbfa560df8ae6e87dd91855577 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 6 Oct 2025 18:05:14 +0000 Subject: [PATCH 147/244] back to merge tuning curve files --- pynapple/process/__init__.py | 5 +- pynapple/process/tuning_curves.py | 316 ++++++++++++++ pynapple/process/tuning_curves_old.py | 323 -------------- tests/test_tuning_curves.py | 233 +--------- tests/test_tuning_curves_old.py | 584 -------------------------- 5 files changed, 320 insertions(+), 1141 deletions(-) delete mode 100644 pynapple/process/tuning_curves_old.py delete mode 100644 tests/test_tuning_curves_old.py diff --git a/pynapple/process/__init__.py b/pynapple/process/__init__.py index 7b1ce4606..58ae66af4 100644 --- a/pynapple/process/__init__.py +++ b/pynapple/process/__init__.py @@ -28,8 +28,7 @@ compute_mean_power_spectral_density, compute_power_spectral_density, ) -from .tuning_curves import compute_mutual_information, compute_tuning_curves -from .tuning_curves_old import ( +from .tuning_curves import ( compute_1d_mutual_info, compute_1d_tuning_curves, compute_1d_tuning_curves_continuous, @@ -37,6 +36,8 @@ compute_2d_tuning_curves, compute_2d_tuning_curves_continuous, compute_discrete_tuning_curves, + compute_mutual_information, + compute_tuning_curves, ) from .warping import build_tensor, warp_tensor from .wavelets import compute_wavelet_transform, generate_morlet_filterbank diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index f45f9cfae..2d6b2f901 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -2,7 +2,10 @@ Functions to compute n-dimensional tuning curves. """ +import inspect import warnings +from collections.abc import Iterable +from functools import wraps import numpy as np import pandas as pd @@ -323,3 +326,316 @@ def compute_mutual_information(tuning_curves): index=tuning_curves.coords["unit"], columns=["bits/sec", "bits/spike"], ) + + +# ===================================================================================== +# OLD FUNCTIONS, DEPRECATED +# ===================================================================================== + + +def _validate_tuning_inputs(func): + @wraps(func) + def wrapper(*args, **kwargs): + # Validate each positional argument + sig = inspect.signature(func) + kwargs = sig.bind_partial(*args, **kwargs).arguments + + if "feature" in kwargs: + if not isinstance(kwargs["feature"], (nap.Tsd, nap.TsdFrame)): + raise TypeError( + "feature should be a Tsd (or TsdFrame with 1 column only)" + ) + if ( + isinstance(kwargs["feature"], nap.TsdFrame) + and not kwargs["feature"].shape[1] == 1 + ): + raise ValueError( + "feature should be a Tsd (or TsdFrame with 1 column only)" + ) + if "features" in kwargs: + if not isinstance(kwargs["features"], nap.TsdFrame): + raise TypeError("features should be a TsdFrame with 2 columns") + if not kwargs["features"].shape[1] == 2: + raise ValueError("features should have 2 columns only.") + if "nb_bins" in kwargs: + if not isinstance(kwargs["nb_bins"], (int, tuple)): + raise TypeError( + "nb_bins should be of type int (or tuple with (int, int) for 2D tuning curves)." + ) + if "group" in kwargs: + if not isinstance(kwargs["group"], nap.TsGroup): + raise TypeError("group should be a TsGroup.") + if "ep" in kwargs: + if not isinstance(kwargs["ep"], nap.IntervalSet): + raise TypeError("ep should be an IntervalSet") + if "minmax" in kwargs: + if not isinstance(kwargs["minmax"], Iterable): + raise TypeError("minmax should be a tuple/list of 2 numbers") + if "dict_ep" in kwargs: + if not isinstance(kwargs["dict_ep"], dict): + raise TypeError("dict_ep should be a dictionary of IntervalSet") + if not all( + isinstance(v, nap.IntervalSet) for v in kwargs["dict_ep"].values() + ): + raise TypeError("dict_ep argument should contain only IntervalSet.") + if "tc" in kwargs: + if not isinstance(kwargs["tc"], (pd.DataFrame, np.ndarray)): + raise TypeError( + "Argument tc should be of type pandas.DataFrame or numpy.ndarray" + ) + if "dict_tc" in kwargs: + if not isinstance(kwargs["dict_tc"], (dict, np.ndarray)): + raise TypeError( + "Argument dict_tc should be a dictionary of numpy.ndarray or numpy.ndarray." + ) + if "bitssec" in kwargs: + if not isinstance(kwargs["bitssec"], bool): + raise TypeError("Argument bitssec should be of type bool") + if "tsdframe" in kwargs: + if not isinstance(kwargs["tsdframe"], (nap.Tsd, nap.TsdFrame)): + raise TypeError("Argument tsdframe should be of type Tsd or TsdFrame.") + # Call the original function with validated inputs + return func(**kwargs) + + return wrapper + + +@_validate_tuning_inputs +def compute_discrete_tuning_curves(group, dict_ep): + """ + Compute discrete tuning curves of a TsGroup using a dictionary of epochs. + The function returns a pandas DataFrame with each row being a key of the dictionary of epochs + and each column being a neurons. + + This function can typically being used for a set of stimulus being presented for multiple epochs. + An example of the dictionary is: + + >>> dict_ep = { + ... "stim0": nap.IntervalSet(start=0, end=1), + ... "stim1":nap.IntervalSet(start=2, end=3) + ... } + + In this case, the function will return a pandas DataFrame : + + >>> tc + neuron0 neuron1 neuron2 + stim0 0 Hz 1 Hz 2 Hz + stim1 3 Hz 4 Hz 5 Hz + + + Parameters + ---------- + group : nap.TsGroup + The group of Ts/Tsd for which the tuning curves will be computed + dict_ep : dict + Dictionary of IntervalSets + + Returns + ------- + pandas.DataFrame + Table of firing rate for each neuron and each IntervalSet + + Raises + ------ + RuntimeError + If group is not a TsGroup object. + """ + idx = np.sort(list(dict_ep.keys())) + tuning_curves = pd.DataFrame(index=idx, columns=list(group.keys()), data=0.0) + + for k in dict_ep.keys(): + for n in group.keys(): + tuning_curves.loc[k, n] = float(len(group[n].restrict(dict_ep[k]))) + + tuning_curves.loc[k] = tuning_curves.loc[k] / dict_ep[k].tot_length("s") + + return tuning_curves + + +@_validate_tuning_inputs +def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): + """ + Deprecated, use `compute_tuning_curves` instead. + """ + warnings.warn( + "compute_1d_tuning_curves is deprecated and will be removed in a future version;" + "use compute_tuning_curves instead.", + DeprecationWarning, + stacklevel=2, + ) + return ( + compute_tuning_curves( + group, + feature, + nb_bins, + range=None if minmax is None else [minmax], + epochs=ep, + ) + .to_pandas() + .T + ) + + +@_validate_tuning_inputs +def compute_1d_tuning_curves_continuous( + tsdframe, feature, nb_bins, ep=None, minmax=None +): + """ + Deprecated, use `compute_tuning_curves` instead. + """ + warnings.warn( + "compute_1d_tuning_curves_continuous is deprecated and will be removed in a future version;" + "use compute_tuning_curves instead.", + DeprecationWarning, + stacklevel=2, + ) + return ( + compute_tuning_curves( + tsdframe, + feature, + nb_bins, + range=None if minmax is None else [minmax], + epochs=ep, + ) + .to_pandas() + .T + ) + + +@_validate_tuning_inputs +def compute_2d_tuning_curves(group, features, nb_bins, ep=None, minmax=None): + """ + Deprecated, use `compute_tuning_curves` instead. + """ + warnings.warn( + "compute_2d_tuning_curves is deprecated and will be removed in a future version;" + "use compute_tuning_curves instead.", + DeprecationWarning, + stacklevel=2, + ) + xarray = compute_tuning_curves( + group, + features, + nb_bins, + range=( + None if minmax is None else [[minmax[0], minmax[1]], [minmax[2], minmax[3]]] + ), + epochs=ep, + ) + tcs = {c: xarray.sel(unit=c).values for c in xarray.coords["unit"].values} + bins = [xarray.coords[dim].values for dim in xarray.coords if dim != "unit"] + return tcs, bins + + +@_validate_tuning_inputs +def compute_2d_tuning_curves_continuous( + tsdframe, features, nb_bins, ep=None, minmax=None +): + """ + Deprecated, use `compute_tuning_curves` instead. + """ + warnings.warn( + "compute_2d_tuning_curves_continuous is deprecated and will be removed in a future version;" + "use compute_tuning_curves instead.", + DeprecationWarning, + stacklevel=2, + ) + xarray = compute_tuning_curves( + tsdframe, + features, + nb_bins, + range=( + None if minmax is None else [[minmax[0], minmax[1]], [minmax[2], minmax[3]]] + ), + epochs=ep, + ) + tcs = {c: xarray.sel(unit=c).values for c in xarray.coords["unit"].values} + bins = [xarray.coords[dim].values for dim in xarray.coords if dim != "unit"] + return tcs, bins + + +@_validate_tuning_inputs +def compute_2d_mutual_info(dict_tc, features, ep=None, minmax=None, bitssec=False): + warnings.warn( + "compute_2d_mutual_info is deprecated and will be removed in a future version;" + "use compute_mutual_information instead.", + DeprecationWarning, + stacklevel=2, + ) + if type(dict_tc) is dict: + tcs = xr.DataArray( + np.array([dict_tc[i] for i in dict_tc.keys()]), + coords={"unit": list(dict_tc.keys())}, + dims=["unit", "0", "1"], + ) + else: + tcs = xr.DataArray( + dict_tc, + coords={"unit": np.arange(len(dict_tc))}, + dims=["unit", "0", "1"], + ) + + nb_bins = (tcs.shape[1] + 1, tcs.shape[2] + 1) + bins = [] + for i in range(2): + if minmax is None: + bins.append( + np.linspace( + np.nanmin(features[:, i]), np.nanmax(features[:, i]), nb_bins[i] + ) + ) + else: + bins.append( + np.linspace(minmax[i + i % 2], minmax[i + 1 + i % 2], nb_bins[i]) + ) + + if isinstance(ep, nap.IntervalSet): + features = features.restrict(ep) + + occupancy, _, _ = np.histogram2d( + features[:, 0].values.flatten(), + features[:, 1].values.flatten(), + [bins[0], bins[1]], + ) + occupancy = occupancy / occupancy.sum() + + tcs.attrs["occupancy"] = occupancy + MI = compute_mutual_information(tcs) + + column = "bits/sec" if bitssec else "bits/spike" + return MI[[column]].rename({column: "SI"}, axis=1) + + +@_validate_tuning_inputs +def compute_1d_mutual_info(tc, feature, ep=None, minmax=None, bitssec=False): + warnings.warn( + "compute_1d_mutual_info is deprecated and will be removed in a future version;" + "use compute_mutual_information instead.", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(tc, pd.DataFrame): + tcs = xr.DataArray( + tc.values.T, coords={"unit": tc.columns.values, "0": tc.index} + ) + else: + tcs = xr.DataArray( + tc.T, coords={"unit": np.arange(tc.shape[1])}, dims=["unit", "0"] + ) + + nb_bins = tc.shape[0] + 1 + if minmax is None: + bins = np.linspace(np.nanmin(feature), np.nanmax(feature), nb_bins) + else: + bins = np.linspace(minmax[0], minmax[1], nb_bins) + + if isinstance(ep, nap.IntervalSet): + occupancy, _ = np.histogram(feature.restrict(ep).values, bins) + else: + occupancy, _ = np.histogram(feature.values, bins) + occupancy = occupancy / occupancy.sum() + tcs.attrs["occupancy"] = occupancy + MI = compute_mutual_information(tcs) + + column = "bits/sec" if bitssec else "bits/spike" + return MI[[column]].rename({column: "SI"}, axis=1) diff --git a/pynapple/process/tuning_curves_old.py b/pynapple/process/tuning_curves_old.py deleted file mode 100644 index 90bbf35a3..000000000 --- a/pynapple/process/tuning_curves_old.py +++ /dev/null @@ -1,323 +0,0 @@ -""" -Old functions to compute 1- and 2-dimensional tuning curves. -""" - -import inspect -import warnings -from collections.abc import Iterable -from functools import wraps - -import numpy as np -import pandas as pd -import xarray as xr - -from .. import core as nap -from .tuning_curves import compute_mutual_information, compute_tuning_curves - - -def _validate_tuning_inputs(func): - @wraps(func) - def wrapper(*args, **kwargs): - # Validate each positional argument - sig = inspect.signature(func) - kwargs = sig.bind_partial(*args, **kwargs).arguments - - if "feature" in kwargs: - if not isinstance(kwargs["feature"], (nap.Tsd, nap.TsdFrame)): - raise TypeError( - "feature should be a Tsd (or TsdFrame with 1 column only)" - ) - if ( - isinstance(kwargs["feature"], nap.TsdFrame) - and not kwargs["feature"].shape[1] == 1 - ): - raise ValueError( - "feature should be a Tsd (or TsdFrame with 1 column only)" - ) - if "features" in kwargs: - if not isinstance(kwargs["features"], nap.TsdFrame): - raise TypeError("features should be a TsdFrame with 2 columns") - if not kwargs["features"].shape[1] == 2: - raise ValueError("features should have 2 columns only.") - if "nb_bins" in kwargs: - if not isinstance(kwargs["nb_bins"], (int, tuple)): - raise TypeError( - "nb_bins should be of type int (or tuple with (int, int) for 2D tuning curves)." - ) - if "group" in kwargs: - if not isinstance(kwargs["group"], nap.TsGroup): - raise TypeError("group should be a TsGroup.") - if "ep" in kwargs: - if not isinstance(kwargs["ep"], nap.IntervalSet): - raise TypeError("ep should be an IntervalSet") - if "minmax" in kwargs: - if not isinstance(kwargs["minmax"], Iterable): - raise TypeError("minmax should be a tuple/list of 2 numbers") - if "dict_ep" in kwargs: - if not isinstance(kwargs["dict_ep"], dict): - raise TypeError("dict_ep should be a dictionary of IntervalSet") - if not all( - isinstance(v, nap.IntervalSet) for v in kwargs["dict_ep"].values() - ): - raise TypeError("dict_ep argument should contain only IntervalSet.") - if "tc" in kwargs: - if not isinstance(kwargs["tc"], (pd.DataFrame, np.ndarray)): - raise TypeError( - "Argument tc should be of type pandas.DataFrame or numpy.ndarray" - ) - if "dict_tc" in kwargs: - if not isinstance(kwargs["dict_tc"], (dict, np.ndarray)): - raise TypeError( - "Argument dict_tc should be a dictionary of numpy.ndarray or numpy.ndarray." - ) - if "bitssec" in kwargs: - if not isinstance(kwargs["bitssec"], bool): - raise TypeError("Argument bitssec should be of type bool") - if "tsdframe" in kwargs: - if not isinstance(kwargs["tsdframe"], (nap.Tsd, nap.TsdFrame)): - raise TypeError("Argument tsdframe should be of type Tsd or TsdFrame.") - # Call the original function with validated inputs - return func(**kwargs) - - return wrapper - - -@_validate_tuning_inputs -def compute_discrete_tuning_curves(group, dict_ep): - """ - Compute discrete tuning curves of a TsGroup using a dictionary of epochs. - The function returns a pandas DataFrame with each row being a key of the dictionary of epochs - and each column being a neurons. - - This function can typically being used for a set of stimulus being presented for multiple epochs. - An example of the dictionary is: - - >>> dict_ep = { - "stim0": nap.IntervalSet(start=0, end=1), - "stim1":nap.IntervalSet(start=2, end=3) - } - - In this case, the function will return a pandas DataFrame : - - >>> tc - neuron0 neuron1 neuron2 - stim0 0 Hz 1 Hz 2 Hz - stim1 3 Hz 4 Hz 5 Hz - - - Parameters - ---------- - group : nap.TsGroup - The group of Ts/Tsd for which the tuning curves will be computed - dict_ep : dict - Dictionary of IntervalSets - - Returns - ------- - pandas.DataFrame - Table of firing rate for each neuron and each IntervalSet - - Raises - ------ - RuntimeError - If group is not a TsGroup object. - """ - idx = np.sort(list(dict_ep.keys())) - tuning_curves = pd.DataFrame(index=idx, columns=list(group.keys()), data=0.0) - - for k in dict_ep.keys(): - for n in group.keys(): - tuning_curves.loc[k, n] = float(len(group[n].restrict(dict_ep[k]))) - - tuning_curves.loc[k] = tuning_curves.loc[k] / dict_ep[k].tot_length("s") - - return tuning_curves - - -@_validate_tuning_inputs -def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): - """ - Deprecated, use `compute_tuning_curves` instead. - """ - warnings.warn( - "compute_1d_tuning_curves is deprecated and will be removed in a future version;" - "use compute_tuning_curves instead.", - DeprecationWarning, - stacklevel=2, - ) - return ( - compute_tuning_curves( - group, - feature, - nb_bins, - range=None if minmax is None else [minmax], - epochs=ep, - ) - .to_pandas() - .T - ) - - -@_validate_tuning_inputs -def compute_1d_tuning_curves_continuous( - tsdframe, feature, nb_bins, ep=None, minmax=None -): - """ - Deprecated, use `compute_tuning_curves` instead. - """ - warnings.warn( - "compute_1d_tuning_curves_continuous is deprecated and will be removed in a future version;" - "use compute_tuning_curves instead.", - DeprecationWarning, - stacklevel=2, - ) - return ( - compute_tuning_curves( - tsdframe, - feature, - nb_bins, - range=None if minmax is None else [minmax], - epochs=ep, - ) - .to_pandas() - .T - ) - - -@_validate_tuning_inputs -def compute_2d_tuning_curves(group, features, nb_bins, ep=None, minmax=None): - """ - Deprecated, use `compute_tuning_curves` instead. - """ - warnings.warn( - "compute_2d_tuning_curves is deprecated and will be removed in a future version;" - "use compute_tuning_curves instead.", - DeprecationWarning, - stacklevel=2, - ) - xarray = compute_tuning_curves( - group, - features, - nb_bins, - range=( - None if minmax is None else [[minmax[0], minmax[1]], [minmax[2], minmax[3]]] - ), - epochs=ep, - ) - tcs = {c: xarray.sel(unit=c).values for c in xarray.coords["unit"].values} - bins = [xarray.coords[dim].values for dim in xarray.coords if dim != "unit"] - return tcs, bins - - -@_validate_tuning_inputs -def compute_2d_tuning_curves_continuous( - tsdframe, features, nb_bins, ep=None, minmax=None -): - """ - Deprecated, use `compute_tuning_curves` instead. - """ - warnings.warn( - "compute_2d_tuning_curves_continuous is deprecated and will be removed in a future version;" - "use compute_tuning_curves instead.", - DeprecationWarning, - stacklevel=2, - ) - xarray = compute_tuning_curves( - tsdframe, - features, - nb_bins, - range=( - None if minmax is None else [[minmax[0], minmax[1]], [minmax[2], minmax[3]]] - ), - epochs=ep, - ) - tcs = {c: xarray.sel(unit=c).values for c in xarray.coords["unit"].values} - bins = [xarray.coords[dim].values for dim in xarray.coords if dim != "unit"] - return tcs, bins - - -@_validate_tuning_inputs -def compute_2d_mutual_info(dict_tc, features, ep=None, minmax=None, bitssec=False): - warnings.warn( - "compute_2d_mutual_info is deprecated and will be removed in a future version;" - "use compute_mutual_information instead.", - DeprecationWarning, - stacklevel=2, - ) - if type(dict_tc) is dict: - tcs = xr.DataArray( - np.array([dict_tc[i] for i in dict_tc.keys()]), - coords={"unit": list(dict_tc.keys())}, - dims=["unit", "0", "1"], - ) - else: - tcs = xr.DataArray( - dict_tc, - coords={"unit": np.arange(len(dict_tc))}, - dims=["unit", "0", "1"], - ) - - nb_bins = (tcs.shape[1] + 1, tcs.shape[2] + 1) - bins = [] - for i in range(2): - if minmax is None: - bins.append( - np.linspace( - np.nanmin(features[:, i]), np.nanmax(features[:, i]), nb_bins[i] - ) - ) - else: - bins.append( - np.linspace(minmax[i + i % 2], minmax[i + 1 + i % 2], nb_bins[i]) - ) - - if isinstance(ep, nap.IntervalSet): - features = features.restrict(ep) - - occupancy, _, _ = np.histogram2d( - features[:, 0].values.flatten(), - features[:, 1].values.flatten(), - [bins[0], bins[1]], - ) - occupancy = occupancy / occupancy.sum() - - tcs.attrs["occupancy"] = occupancy - MI = compute_mutual_information(tcs) - - column = "bits/sec" if bitssec else "bits/spike" - return MI[[column]].rename({column: "SI"}, axis=1) - - -@_validate_tuning_inputs -def compute_1d_mutual_info(tc, feature, ep=None, minmax=None, bitssec=False): - warnings.warn( - "compute_1d_mutual_info is deprecated and will be removed in a future version;" - "use compute_mutual_information instead.", - DeprecationWarning, - stacklevel=2, - ) - if isinstance(tc, pd.DataFrame): - tcs = xr.DataArray( - tc.values.T, coords={"unit": tc.columns.values, "0": tc.index} - ) - else: - tcs = xr.DataArray( - tc.T, coords={"unit": np.arange(tc.shape[1])}, dims=["unit", "0"] - ) - - nb_bins = tc.shape[0] + 1 - if minmax is None: - bins = np.linspace(np.nanmin(feature), np.nanmax(feature), nb_bins) - else: - bins = np.linspace(minmax[0], minmax[1], nb_bins) - - if isinstance(ep, nap.IntervalSet): - occupancy, _ = np.histogram(feature.restrict(ep).values, bins) - else: - occupancy, _ = np.histogram(feature.values, bins) - occupancy = occupancy / occupancy.sum() - tcs.attrs["occupancy"] = occupancy - MI = compute_mutual_information(tcs) - - column = "bits/sec" if bitssec else "bits/spike" - return MI[[column]].rename({column: "SI"}, axis=1) diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 5d580883a..332159753 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -1,4 +1,4 @@ -"""Tests of tuning curves for `pynapple` package.""" +"""Tests of N-dimensional tuning curves for `pynapple` package.""" from contextlib import nullcontext as does_not_raise @@ -814,237 +814,6 @@ def test_compute_mutual_information(n_units, n_features, pattern): pd.testing.assert_frame_equal(actual, expectation) -@pytest.mark.parametrize( - "tc, feature, ep, minmax, bitssec, expectation", - [ - ( - "a", - get_feature(), - get_ep(), - (0, 1), - True, - "Argument tc should be of type pandas.DataFrame or numpy.ndarray", - ), - ( - pd.DataFrame(), - "a", - get_ep(), - (0, 1), - True, - r"feature should be a Tsd \(or TsdFrame with 1 column only\)", - ), - ( - pd.DataFrame(), - get_feature(), - "a", - (0, 1), - True, - r"ep should be an IntervalSet", - ), - ( - pd.DataFrame(), - get_feature(), - get_ep(), - 1, - True, - r"minmax should be a tuple\/list of 2 numbers", - ), - ( - pd.DataFrame(), - get_feature(), - get_ep(), - (0, 1), - "a", - r"Argument bitssec should be of type bool", - ), - ], -) -def test_compute_1d_mutual_info_errors(tc, feature, ep, minmax, bitssec, expectation): - with pytest.raises(TypeError, match=expectation): - nap.compute_1d_mutual_info(tc, feature, ep, minmax, bitssec) - - -@pytest.mark.parametrize( - "dict_tc, features, ep, minmax, bitssec, expectation", - [ - ( - "a", - get_features(), - get_ep(), - (0, 1), - True, - "Argument dict_tc should be a dictionary of numpy.ndarray or numpy.ndarray", - ), - ( - {0: np.zeros((2, 2))}, - "a", - get_ep(), - (0, 1), - True, - r"features should be a TsdFrame with 2 columns", - ), - ( - {0: np.zeros((2, 2))}, - get_features(), - "a", - (0, 1), - True, - r"ep should be an IntervalSet", - ), - ( - {0: np.zeros((2, 2))}, - get_features(), - get_ep(), - 1, - True, - r"minmax should be a tuple\/list of 2 numbers", - ), - ( - {0: np.zeros((2, 2))}, - get_features(), - get_ep(), - (0, 1), - "a", - r"Argument bitssec should be of type bool", - ), - ], -) -def test_compute_2d_mutual_info_errors( - dict_tc, features, ep, minmax, bitssec, expectation -): - with pytest.raises(TypeError, match=expectation): - nap.compute_2d_mutual_info(dict_tc, features, ep, minmax, bitssec) - - -@pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize( - "args, kwargs, expectation", - [ - ( - ( - pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), - nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), - ), - {}, - np.array([[1.0]]), - ), - ( - ( - pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), - nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), - ), - {"bitssec": True}, - np.array([[5.0]]), - ), - ( - ( - pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), - nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), - ), - {"ep": nap.IntervalSet(start=0, end=49)}, - np.array([[1.0]]), - ), - ( - ( - pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), - nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), - ), - {"minmax": (0, 1)}, - np.array([[1.0]]), - ), - ( - ( - np.array([[0], [10]]), - nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), - ), - {"minmax": (0, 1)}, - np.array([[1.0]]), - ), - ], -) -def test_compute_1d_mutual_info(args, kwargs, expectation): - tc = args[0] - feature = args[1] - si = nap.compute_1d_mutual_info(tc, feature, **kwargs) - assert isinstance(si, pd.DataFrame) - assert list(si.columns) == ["SI"] - if isinstance(tc, pd.DataFrame): - assert list(si.index.values) == list(tc.columns) - np.testing.assert_approx_equal(si.values, expectation) - - -@pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize( - "args, kwargs, expectation", - [ - ( - ( - {0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame( - t=np.arange(100), - d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, - ), - ), - {}, - np.array([[2.0]]), - ), - ( - ( - np.array([[[0, 1], [0, 0]]]), - nap.TsdFrame( - t=np.arange(100), - d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, - ), - ), - {}, - np.array([[2.0]]), - ), - ( - ( - {0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame( - t=np.arange(100), - d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, - ), - ), - {"bitssec": True}, - np.array([[0.5]]), - ), - ( - ( - {0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame( - t=np.arange(100), - d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, - ), - ), - {"ep": nap.IntervalSet(start=0, end=7)}, - np.array([[2.0]]), - ), - ( - ( - {0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame( - t=np.arange(100), - d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, - ), - ), - {"minmax": (0, 1, 0, 1)}, - np.array([[2.0]]), - ), - ], -) -def test_compute_2d_mutual_info(args, kwargs, expectation): - dict_tc = args[0] - features = args[1] - si = nap.compute_2d_mutual_info(dict_tc, features, **kwargs) - assert isinstance(si, pd.DataFrame) - assert list(si.columns) == ["SI"] - if isinstance(dict_tc, dict): - assert list(si.index.values) == list(dict_tc.keys()) - np.testing.assert_approx_equal(si.values, expectation) - - # ------------------------------------------------------------------------------------ # OLD TUNING CURVE TESTS # ------------------------------------------------------------------------------------ diff --git a/tests/test_tuning_curves_old.py b/tests/test_tuning_curves_old.py deleted file mode 100644 index 48659c930..000000000 --- a/tests/test_tuning_curves_old.py +++ /dev/null @@ -1,584 +0,0 @@ -"""Tests of old 1- and 2-dimensional tuning curves for `pynapple` package.""" - -import numpy as np -import pandas as pd -import pytest - -import pynapple as nap - - -def get_group(): - return nap.TsGroup({0: nap.Ts(t=np.arange(0, 100))}) - - -def get_feature(): - return nap.Tsd( - t=np.arange(0, 100, 0.1), - d=np.arange(0, 100, 0.1) % 1.0, - time_support=nap.IntervalSet(0, 100), - ) - - -def get_features(): - tmp = np.vstack( - (np.repeat(np.arange(0, 100), 10), np.tile(np.arange(0, 100), 10)) - ).T - return nap.TsdFrame( - t=np.arange(0, 200, 0.1), - d=np.vstack((tmp, tmp[::-1])), - time_support=nap.IntervalSet(0, 200), - ) - - -def get_ep(): - return nap.IntervalSet(start=0, end=50) - - -def get_tsdframe(): - return nap.TsdFrame(t=np.arange(0, 100), d=np.ones((100, 2))) - - -# ------------------------------------------------------------------------------------ -# MUTUAL INFORMATION TESTS -# ------------------------------------------------------------------------------------ - - -@pytest.mark.parametrize( - "tc, feature, ep, minmax, bitssec, expectation", - [ - ( - "a", - get_feature(), - get_ep(), - (0, 1), - True, - "Argument tc should be of type pandas.DataFrame or numpy.ndarray", - ), - ( - pd.DataFrame(), - "a", - get_ep(), - (0, 1), - True, - r"feature should be a Tsd \(or TsdFrame with 1 column only\)", - ), - ( - pd.DataFrame(), - get_feature(), - "a", - (0, 1), - True, - r"ep should be an IntervalSet", - ), - ( - pd.DataFrame(), - get_feature(), - get_ep(), - 1, - True, - r"minmax should be a tuple\/list of 2 numbers", - ), - ( - pd.DataFrame(), - get_feature(), - get_ep(), - (0, 1), - "a", - r"Argument bitssec should be of type bool", - ), - ], -) -def test_compute_1d_mutual_info_errors(tc, feature, ep, minmax, bitssec, expectation): - with pytest.raises(TypeError, match=expectation): - nap.compute_1d_mutual_info(tc, feature, ep, minmax, bitssec) - - -@pytest.mark.parametrize( - "dict_tc, features, ep, minmax, bitssec, expectation", - [ - ( - "a", - get_features(), - get_ep(), - (0, 1), - True, - "Argument dict_tc should be a dictionary of numpy.ndarray or numpy.ndarray", - ), - ( - {0: np.zeros((2, 2))}, - "a", - get_ep(), - (0, 1), - True, - r"features should be a TsdFrame with 2 columns", - ), - ( - {0: np.zeros((2, 2))}, - get_features(), - "a", - (0, 1), - True, - r"ep should be an IntervalSet", - ), - ( - {0: np.zeros((2, 2))}, - get_features(), - get_ep(), - 1, - True, - r"minmax should be a tuple\/list of 2 numbers", - ), - ( - {0: np.zeros((2, 2))}, - get_features(), - get_ep(), - (0, 1), - "a", - r"Argument bitssec should be of type bool", - ), - ], -) -def test_compute_2d_mutual_info_errors( - dict_tc, features, ep, minmax, bitssec, expectation -): - with pytest.raises(TypeError, match=expectation): - nap.compute_2d_mutual_info(dict_tc, features, ep, minmax, bitssec) - - -@pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize( - "args, kwargs, expectation", - [ - ( - ( - pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), - nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), - ), - {}, - np.array([[1.0]]), - ), - ( - ( - pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), - nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), - ), - {"bitssec": True}, - np.array([[5.0]]), - ), - ( - ( - pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), - nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), - ), - {"ep": nap.IntervalSet(start=0, end=49)}, - np.array([[1.0]]), - ), - ( - ( - pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), - nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), - ), - {"minmax": (0, 1)}, - np.array([[1.0]]), - ), - ( - ( - np.array([[0], [10]]), - nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), - ), - {"minmax": (0, 1)}, - np.array([[1.0]]), - ), - ], -) -def test_compute_1d_mutual_info(args, kwargs, expectation): - tc = args[0] - feature = args[1] - si = nap.compute_1d_mutual_info(tc, feature, **kwargs) - assert isinstance(si, pd.DataFrame) - assert list(si.columns) == ["SI"] - if isinstance(tc, pd.DataFrame): - assert list(si.index.values) == list(tc.columns) - np.testing.assert_approx_equal(si.values, expectation) - - -@pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize( - "args, kwargs, expectation", - [ - ( - ( - {0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame( - t=np.arange(100), - d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, - ), - ), - {}, - np.array([[2.0]]), - ), - ( - ( - np.array([[[0, 1], [0, 0]]]), - nap.TsdFrame( - t=np.arange(100), - d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, - ), - ), - {}, - np.array([[2.0]]), - ), - ( - ( - {0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame( - t=np.arange(100), - d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, - ), - ), - {"bitssec": True}, - np.array([[0.5]]), - ), - ( - ( - {0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame( - t=np.arange(100), - d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, - ), - ), - {"ep": nap.IntervalSet(start=0, end=7)}, - np.array([[2.0]]), - ), - ( - ( - {0: np.array([[0, 1], [0, 0]])}, - nap.TsdFrame( - t=np.arange(100), - d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, - ), - ), - {"minmax": (0, 1, 0, 1)}, - np.array([[2.0]]), - ), - ], -) -def test_compute_2d_mutual_info(args, kwargs, expectation): - dict_tc = args[0] - features = args[1] - si = nap.compute_2d_mutual_info(dict_tc, features, **kwargs) - assert isinstance(si, pd.DataFrame) - assert list(si.columns) == ["SI"] - if isinstance(dict_tc, dict): - assert list(si.index.values) == list(dict_tc.keys()) - np.testing.assert_approx_equal(si.values, expectation) - - -# ------------------------------------------------------------------------------------ -# OLD TUNING CURVE TESTS -# ------------------------------------------------------------------------------------ - - -@pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize( - "args, kwargs, expectation", - [ - ((get_group(), get_feature(), 10), {}, np.array([10.0] + [0.0] * 9)[:, None]), - ( - (get_group(), get_feature(), 10), - {"ep": get_ep()}, - np.array([10.0] + [0.0] * 9)[:, None], - ), - ( - (get_group(), get_feature(), 10), - {"minmax": (0, 0.9)}, - np.array([10.0] + [0.0] * 9)[:, None], - ), - ( - (get_group(), get_feature(), 20), - {"minmax": (0, 1.9)}, - np.array([10.0] + [0.0] * 9 + [np.nan] * 10)[:, None], - ), - ], -) -def test_compute_1d_tuning_curves(args, kwargs, expectation): - tc = nap.compute_1d_tuning_curves(*args, **kwargs) - # Columns - assert list(tc.columns) == list(args[0].keys()) - - # Index - assert len(tc) == args[2] - if "minmax" in kwargs: - tmp = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], args[2] + 1) - else: - tmp = np.linspace(np.min(args[1]), np.max(args[1]), args[2] + 1) - np.testing.assert_almost_equal(tmp[0:-1] + np.diff(tmp) / 2, tc.index.values) - - # Array - np.testing.assert_almost_equal(tc.values, expectation) - - -@pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize( - "args, kwargs, expectation", - [ - ((get_group(), get_features(), 10), {}, np.ones((10, 10)) * 0.5), - ((get_group(), get_features(), (10, 10)), {}, np.ones((10, 10)) * 0.5), - ( - (get_group(), get_features(), 10), - {"ep": nap.IntervalSet(0, 400)}, - np.ones((10, 10)) * 0.5, - ), - ( - (get_group(), get_features(), 10), - {"minmax": (0, 100, 0, 100)}, - np.ones((10, 10)) * 0.5, - ), - ( - (get_group(), get_features(), 10), - {"minmax": (0, 200, 0, 100)}, - np.vstack((np.ones((5, 10)) * 0.5, np.ones((5, 10)) * np.nan)), - ), - ], -) -def test_compute_2d_tuning_curves(args, kwargs, expectation): - tc, xy = nap.compute_2d_tuning_curves(*args, **kwargs) - assert isinstance(tc, dict) - - # Keys - assert list(tc.keys()) == list(args[0].keys()) - - # Index - assert isinstance(xy, list) - assert len(xy) == 2 - nb_bins = args[2] - if isinstance(args[2], int): - nb_bins = (args[2], args[2]) - if "minmax" in kwargs: - tmp1 = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins[0] + 1) - tmp2 = np.linspace(kwargs["minmax"][2], kwargs["minmax"][3], nb_bins[1] + 1) - else: - tmp1 = np.linspace(np.min(args[1][:, 0]), np.max(args[1][:, 0]), nb_bins[0] + 1) - tmp2 = np.linspace(np.min(args[1][:, 1]), np.max(args[1][:, 1]), nb_bins[1] + 1) - - np.testing.assert_almost_equal(tmp1[0:-1] + np.diff(tmp1) / 2, xy[0]) - np.testing.assert_almost_equal(tmp2[0:-1] + np.diff(tmp2) / 2, xy[1]) - - # Values - for i in tc.keys(): - assert tc[i].shape == nb_bins - np.testing.assert_almost_equal(tc[i], expectation) - - -@pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize( - "args, kwargs, expectation", - [ - ( - (get_tsdframe(), get_feature(), 10), - {}, - np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), - ), - ( - (get_tsdframe(), get_feature()[:, np.newaxis], 10), - {}, - np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), - ), - ( - (get_tsdframe()[:, 0], get_feature(), 10), - {}, - np.vstack((np.ones((1, 1)), np.zeros((9, 1)))), - ), - ( - (get_tsdframe(), get_feature(), 10), - {"ep": get_ep()}, - np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), - ), - ( - (get_tsdframe(), get_feature(), 10), - {"minmax": (0, 0.9)}, - np.vstack((np.ones((1, 2)), np.zeros((9, 2)))), - ), - ( - (get_tsdframe(), get_feature(), 20), - {"minmax": (0, 1.9)}, - np.vstack((np.ones((1, 2)), np.zeros((9, 2)), np.ones((10, 2)) * np.nan)), - ), - ], -) -def test_compute_1d_tuning_curves_continuous(args, kwargs, expectation): - tsdframe, feature, nb_bins = args - tc = nap.compute_1d_tuning_curves_continuous(tsdframe, feature, nb_bins, **kwargs) - # Columns - if hasattr(tsdframe, "columns"): - assert list(tc.columns) == list(tsdframe.columns) - # Index - assert len(tc) == nb_bins - if "minmax" in kwargs: - tmp = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins + 1) - else: - tmp = np.linspace(np.min(feature), np.max(feature), nb_bins + 1) - np.testing.assert_almost_equal(tmp[0:-1] + np.diff(tmp) / 2, tc.index.values) - # Array - np.testing.assert_almost_equal(tc.values, expectation) - - -@pytest.mark.filterwarnings("ignore") -@pytest.mark.parametrize( - "tsdframe, nb_bins, kwargs, expectation", - [ - ( - nap.TsdFrame( - t=np.arange(0, 100), - d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), - ), - 1, - {}, - {0: np.array([[1.0]]), 1: np.array([[2.0]])}, - ), - ( - nap.TsdFrame( - t=np.arange(0, 100), - d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), - columns=["x", "y"], - ), - 2, - {}, - {"x": np.ones((2, 2)), "y": np.ones((2, 2)) * 2}, - ), - ( - nap.Tsd(t=np.arange(0, 100), d=np.hstack((np.ones((100,)) * 2))), - 2, - {}, - {0: np.ones((2, 2)) * 2}, - ), - ( - nap.TsdFrame( - t=np.arange(0, 100), - d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), - ), - (1, 2), - {}, - {0: np.array([[1.0, 1.0]]), 1: np.array([[2.0, 2.0]])}, - ), - ( - nap.TsdFrame( - t=np.arange(0, 100), - d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), - ), - 1, - {"ep": get_ep()}, - {0: np.array([[1.0]]), 1: np.array([[2.0]])}, - ), - ( - nap.TsdFrame( - t=np.arange(0, 100), - d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), - ), - 1, - {"minmax": (0, 1, 0, 1)}, - {0: np.array([[1.0]]), 1: np.array([[2.0]])}, - ), - ( - nap.TsdFrame( - t=np.arange(0, 100), - d=np.hstack((np.ones((100, 1)), np.ones((100, 1)) * 2)), - ), - (1, 3), - {"minmax": (0, 1, 0, 3)}, - {0: np.array([[1.0, 1.0, np.nan]]), 1: np.array([[2.0, 2.0, np.nan]])}, - ), - ], -) -def test_compute_2d_tuning_curves_continuous(tsdframe, nb_bins, kwargs, expectation): - features = nap.TsdFrame( - t=np.arange(100), d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T - ) - tc, xy = nap.compute_2d_tuning_curves_continuous( - tsdframe, features, nb_bins, **kwargs - ) - - # Keys - if hasattr(tsdframe, "columns"): - assert list(tc.keys()) == list(tsdframe.columns) - - # Index - assert isinstance(xy, list) - assert len(xy) == 2 - if isinstance(nb_bins, int): - nb_bins = (nb_bins, nb_bins) - if "minmax" in kwargs: - tmp1 = np.linspace(kwargs["minmax"][0], kwargs["minmax"][1], nb_bins[0] + 1) - tmp2 = np.linspace(kwargs["minmax"][2], kwargs["minmax"][3], nb_bins[1] + 1) - else: - tmp1 = np.linspace( - np.min(features[:, 0]), np.max(features[:, 0]), nb_bins[0] + 1 - ) - tmp2 = np.linspace( - np.min(features[:, 1]), np.max(features[:, 1]), nb_bins[1] + 1 - ) - - np.testing.assert_almost_equal(tmp1[0:-1] + np.diff(tmp1) / 2, xy[0]) - np.testing.assert_almost_equal(tmp2[0:-1] + np.diff(tmp2) / 2, xy[1]) - - # Values - for i in tc.keys(): - assert tc[i].shape == nb_bins - np.testing.assert_almost_equal(tc[i], expectation[i]) - - -# ------------------------------------------------------------------------------------ -# DISCRETE TUNING CURVE TESTS -# ------------------------------------------------------------------------------------ - - -@pytest.mark.parametrize( - "group, dict_ep, expectation", - [ - ( - "a", - { - 0: nap.IntervalSet(start=0, end=50), - 1: nap.IntervalSet(start=50, end=100), - }, - pytest.raises(TypeError, match="group should be a TsGroup."), - ), - ( - get_group(), - "a", - pytest.raises( - TypeError, match="dict_ep should be a dictionary of IntervalSet" - ), - ), - ( - get_group(), - {0: "a", 1: nap.IntervalSet(start=50, end=100)}, - pytest.raises( - TypeError, match="dict_ep argument should contain only IntervalSet." - ), - ), - ], -) -def test_compute_discrete_tuning_curves_errors(group, dict_ep, expectation): - with expectation: - nap.compute_discrete_tuning_curves(group, dict_ep) - - -@pytest.mark.parametrize("group", [get_group()]) -@pytest.mark.parametrize( - "dict_ep", - [ - {0: nap.IntervalSet(start=0, end=50), 1: nap.IntervalSet(start=50, end=100)}, - { - "0": nap.IntervalSet(start=0, end=50), - "1": nap.IntervalSet(start=50, end=100), - }, - ], -) -def test_compute_discrete_tuning_curves(group, dict_ep): - tc = nap.compute_discrete_tuning_curves(group, dict_ep) - assert len(tc) == 2 - assert list(tc.columns) == list(group.keys()) - assert list(tc.index.values) == list(dict_ep.keys()) - np.testing.assert_almost_equal(tc.iloc[0, 0], 51 / 50) - np.testing.assert_almost_equal(tc.iloc[1, 0], 1) From 00aa3b4d6cf4b00f0f54eeedaf9f7e706f0dd6b5 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 6 Oct 2025 18:07:16 +0000 Subject: [PATCH 148/244] back to merged --- doc/user_guide/06_tuning_curves.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index 54524688a..f15857f34 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -350,7 +350,7 @@ dict_ep = { } ``` -[`nap.compute_discrete_tuning_curves`](pynapple.process.tuning_curves_old.compute_discrete_tuning_curves) takes a `TsGroup` for spiking activity and a dictionary of epochs. +[`nap.compute_discrete_tuning_curves`](pynapple.process.tuning_curves.compute_discrete_tuning_curves) takes a `TsGroup` for spiking activity and a dictionary of epochs. The output is a pandas DataFrame where each column is a unit in the `TsGroup` and each row is one `IntervalSet`. The output will be the mean firing rate of the neuron during this set of intervals. From 29604f6b7b5b753d64e1efe7ae233ec2618ae6ae Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 7 Oct 2025 10:56:08 +0000 Subject: [PATCH 149/244] docstring --- pynapple/process/tuning_curves.py | 52 ++++++++++++++++++++++++++----- 1 file changed, 44 insertions(+), 8 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 2d6b2f901..2bf58222b 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -280,7 +280,7 @@ def compute_tuning_curves( def compute_mutual_information(tuning_curves): """ - Mutual information of an n-dimensional tuning curve. + Computes mutual information from n-dimensional tuning curves. Parameters ---------- @@ -289,7 +289,7 @@ def compute_mutual_information(tuning_curves): Returns ------- - pd.DataFrame + pandas.DataFrame A table containing the spatial information per unit, in both bits/sec and bits/spike. References @@ -298,26 +298,56 @@ def compute_mutual_information(tuning_curves): An information-theoretic approach to deciphering the hippocampal code. In Advances in neural information processing systems (pp. 1030-1037). + Examples + -------- + We can compute the mutual information between a variable and a set of neurons' firing from the tuning curves: + + >>> import pynapple as nap + >>> import numpy as np; np.random.seed(42) + >>> epoch = nap.IntervalSet([0, 100]) + >>> t = np.arange(0, 100, 0.01) + >>> feature = nap.Tsd(t=t, d=np.clip(t*0.01 + np.random.normal(0, 0.02, len(t)), 0, 1), time_support=epoch) + >>> group = nap.TsGroup({ + ... 1: nap.Ts(t[(feature.values >= 0.2) & (feature.values < 0.3)]), + ... 2: nap.Ts(t[(feature.values >= 0.7) & (feature.values < 0.8)]) + ... }, time_support=epoch) + >>> tcs = nap.compute_tuning_curves(group, feature, bins=10) + >>> tcs + Size: 160B + array([[ 0., 0., 100., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 100., 0., 0.]]) + Coordinates: + * unit (unit) int64 16B 1 2 + * 0 (0) float64 80B 0.05 0.15 0.25 0.35 0.45 0.55 0.65 0.75 0.85 0.95 + Attributes: + occupancy: [ 985. 1009. 1014. 996. 993. 1008. 991. 1008. 999. 997.] + bin_edges: [array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ])] + >>> MI = nap.compute_mutual_information(tcs) + >>> MI + bits/sec bits/spike + 1 33.480966 3.301870 + 2 33.369159 3.310432 """ if not isinstance(tuning_curves, xr.DataArray): raise TypeError( "tuning_curves should be an xr.DataArray as computed by compute_tuning_curves." ) + if "occupancy" not in tuning_curves.attrs: + raise ValueError("No occupancy found in tuning curves.") + occupancy = tuning_curves.attrs["occupancy"] + occupancy = occupancy / np.nansum(occupancy) + fx = tuning_curves.values axes = tuple(range(1, fx.ndim)) - fr_keepdims = np.nansum( - fx * tuning_curves.attrs["occupancy"], axis=axes, keepdims=True - ) + fr_keepdims = np.nansum(fx * occupancy, axis=axes, keepdims=True) fr_scalar = np.squeeze(fr_keepdims, axis=axes) with warnings.catch_warnings(): warnings.simplefilter("ignore") fxfr = fx / fr_keepdims logfx = np.log2(fxfr) logfx[~np.isfinite(logfx)] = 0.0 - MI_bits_per_sec = np.nansum( - tuning_curves.attrs["occupancy"] * fx * logfx, axis=axes - ) + MI_bits_per_sec = np.nansum(occupancy * fx * logfx, axis=axes) with np.errstate(divide="ignore", invalid="ignore"): MI_bits_per_spike = MI_bits_per_sec / fr_scalar @@ -556,6 +586,9 @@ def compute_2d_tuning_curves_continuous( @_validate_tuning_inputs def compute_2d_mutual_info(dict_tc, features, ep=None, minmax=None, bitssec=False): + """ + Deprecated, use `compute_mutual_information` instead. + """ warnings.warn( "compute_2d_mutual_info is deprecated and will be removed in a future version;" "use compute_mutual_information instead.", @@ -608,6 +641,9 @@ def compute_2d_mutual_info(dict_tc, features, ep=None, minmax=None, bitssec=Fals @_validate_tuning_inputs def compute_1d_mutual_info(tc, feature, ep=None, minmax=None, bitssec=False): + """ + Deprecated, use `compute_mutual_information` instead. + """ warnings.warn( "compute_1d_mutual_info is deprecated and will be removed in a future version;" "use compute_mutual_information instead.", From 9ac48d3906e2e1ec18226fbf4503af80289b0800 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 7 Oct 2025 11:16:20 +0000 Subject: [PATCH 150/244] use shape for latest zarr --- doc/user_guide/02_input_output.md | 2 +- pynapple/core/time_series.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/user_guide/02_input_output.md b/doc/user_guide/02_input_output.md index af12ed61d..9588ff9bb 100644 --- a/doc/user_guide/02_input_output.md +++ b/doc/user_guide/02_input_output.md @@ -212,7 +212,7 @@ It is possible to use Higher level library like [zarr](https://zarr.readthedocs. ```{code-cell} ipython3 import zarr zarr_array = zarr.zeros((10000, 5), chunks=(1000, 5), dtype='i4') -timestep = np.arange(len(zarr_array)) +timestep = np.arange(zarr_array.shape[0]) tsdframe = nap.TsdFrame(t=timestep, d=zarr_array) ``` diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index 357b0644a..cb8db87c2 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -178,10 +178,10 @@ def __init__(self, t, d, time_units="s", time_support=None, load_array=True): ) self.values = d - assert len(self.index) == len( - self.values + assert ( + len(self.index) == self.values.shape[0] ), "Length of values {} does not match length of index {}".format( - len(self.values), len(self.index) + len(self.values.shape[0]), len(self.index) ) if isinstance(time_support, IntervalSet) and len(self.index): From 91501c2bd7ee8e4a299d7689f22b01121b5a3018 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 7 Oct 2025 11:19:31 +0000 Subject: [PATCH 151/244] remove len, dumb --- pynapple/core/time_series.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index cb8db87c2..8cc6d19f9 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -178,10 +178,10 @@ def __init__(self, t, d, time_units="s", time_support=None, load_array=True): ) self.values = d - assert ( - len(self.index) == self.values.shape[0] - ), "Length of values {} does not match length of index {}".format( - len(self.values.shape[0]), len(self.index) + assert len(self.index) == self.values.shape[0], ( + "Length of values {} does not match length of index {}".format( + self.values.shape[0], len(self.index) + ) ) if isinstance(time_support, IntervalSet) and len(self.index): @@ -1483,9 +1483,9 @@ def __init__( if c is None or len(c) != self.values.shape[1]: c = np.arange(self.values.shape[1], dtype="int") else: - assert ( - len(c) == self.values.shape[1] - ), "Number of columns should match the second dimension of d" + assert len(c) == self.values.shape[1], ( + "Number of columns should match the second dimension of d" + ) self.columns = pd.Index(c) self.nap_class = self.__class__.__name__ From 66e7220943aa4a6f2b85ffc56e9c3aa5f051de41 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 7 Oct 2025 11:23:17 +0000 Subject: [PATCH 152/244] formatting --- pynapple/core/time_series.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index 8cc6d19f9..2f91622b0 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -178,10 +178,10 @@ def __init__(self, t, d, time_units="s", time_support=None, load_array=True): ) self.values = d - assert len(self.index) == self.values.shape[0], ( - "Length of values {} does not match length of index {}".format( - self.values.shape[0], len(self.index) - ) + assert ( + len(self.index) == self.values.shape[0] + ), "Length of values {} does not match length of index {}".format( + self.values.shape[0], len(self.index) ) if isinstance(time_support, IntervalSet) and len(self.index): @@ -1483,9 +1483,9 @@ def __init__( if c is None or len(c) != self.values.shape[1]: c = np.arange(self.values.shape[1], dtype="int") else: - assert len(c) == self.values.shape[1], ( - "Number of columns should match the second dimension of d" - ) + assert ( + len(c) == self.values.shape[1] + ), "Number of columns should match the second dimension of d" self.columns = pd.Index(c) self.nap_class = self.__class__.__name__ From 452d4dfc11bd37e01e28333eac7d82cdbe1dd5fd Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 7 Oct 2025 11:31:48 +0000 Subject: [PATCH 153/244] test for occupancy --- tests/test_tuning_curves.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 332159753..765852dfd 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -787,7 +787,13 @@ def get_testing_set(n_units=1, n_features=1, pattern="uniform"): match="tuning_curves should be an xr.DataArray as computed by compute_tuning_curves.", ), ), - (get_testing_set(1, 1)[0], does_not_raise()), + ( + (lambda x: (x.attrs.clear(), x)[1])(get_testing_set()[0]), + pytest.raises( + ValueError, + match="No occupancy found in tuning curves.", + ), + ), (get_testing_set(1, 2)[0], does_not_raise()), (get_testing_set(1, 3)[0], does_not_raise()), (get_testing_set(2, 1)[0], does_not_raise()), From 716b743cd1d6b25f7b3264b477024bf8bfca7304 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 7 Oct 2025 11:46:42 +0000 Subject: [PATCH 154/244] readd old mutual info tests --- tests/test_tuning_curves.py | 238 ++++++++++++++++++++++++++++++++++++ 1 file changed, 238 insertions(+) diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 765852dfd..a9c92d09c 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -820,6 +820,244 @@ def test_compute_mutual_information(n_units, n_features, pattern): pd.testing.assert_frame_equal(actual, expectation) +# ------------------------------------------------------------------------------------ +# OLD MUTUAL INFORMATION TESTS +# ------------------------------------------------------------------------------------ + + +@pytest.mark.parametrize( + "tc, feature, ep, minmax, bitssec, expected_exception", + [ + ( + "a", + get_feature(), + get_ep(), + (0, 1), + True, + "Argument tc should be of type pandas.DataFrame or numpy.ndarray", + ), + ( + pd.DataFrame(), + "a", + get_ep(), + (0, 1), + True, + r"feature should be a Tsd \(or TsdFrame with 1 column only\)", + ), + ( + pd.DataFrame(), + get_feature(), + "a", + (0, 1), + True, + r"ep should be an IntervalSet", + ), + ( + pd.DataFrame(), + get_feature(), + get_ep(), + 1, + True, + r"minmax should be a tuple\/list of 2 numbers", + ), + ( + pd.DataFrame(), + get_feature(), + get_ep(), + (0, 1), + "a", + r"Argument bitssec should be of type bool", + ), + ], +) +def test_compute_1d_mutual_info_errors( + tc, feature, ep, minmax, bitssec, expected_exception +): + with pytest.raises(TypeError, match=expected_exception): + nap.compute_1d_mutual_info(tc, feature, ep, minmax, bitssec) + + +@pytest.mark.parametrize( + "dict_tc, features, ep, minmax, bitssec, expected_exception", + [ + ( + "a", + get_features(), + get_ep(), + (0, 1), + True, + "Argument dict_tc should be a dictionary of numpy.ndarray or numpy.ndarray", + ), + ( + {0: np.zeros((2, 2))}, + "a", + get_ep(), + (0, 1), + True, + r"features should be a TsdFrame with 2 columns", + ), + ( + {0: np.zeros((2, 2))}, + get_features(), + "a", + (0, 1), + True, + r"ep should be an IntervalSet", + ), + ( + {0: np.zeros((2, 2))}, + get_features(), + get_ep(), + 1, + True, + r"minmax should be a tuple\/list of 2 numbers", + ), + ( + {0: np.zeros((2, 2))}, + get_features(), + get_ep(), + (0, 1), + "a", + r"Argument bitssec should be of type bool", + ), + ], +) +def test_compute_2d_mutual_info_errors( + dict_tc, features, ep, minmax, bitssec, expected_exception +): + with pytest.raises(TypeError, match=expected_exception): + nap.compute_2d_mutual_info(dict_tc, features, ep, minmax, bitssec) + + +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize( + "args, kwargs, expected", + [ + ( + ( + pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), + nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), + ), + {}, + np.array([[1.0]]), + ), + ( + ( + pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), + nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), + ), + {"bitssec": True}, + np.array([[5.0]]), + ), + ( + ( + pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), + nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), + ), + {"ep": nap.IntervalSet(start=0, end=49)}, + np.array([[1.0]]), + ), + ( + ( + pd.DataFrame(index=np.arange(0, 2), data=np.array([0, 10])), + nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), + ), + {"minmax": (0, 1)}, + np.array([[1.0]]), + ), + ( + ( + np.array([[0], [10]]), + nap.Tsd(t=np.arange(100), d=np.tile(np.arange(2), 50)), + ), + {"minmax": (0, 1)}, + np.array([[1.0]]), + ), + ], +) +def test_compute_1d_mutual_info(args, kwargs, expected): + tc = args[0] + feature = args[1] + si = nap.compute_1d_mutual_info(tc, feature, **kwargs) + assert isinstance(si, pd.DataFrame) + assert list(si.columns) == ["SI"] + if isinstance(tc, pd.DataFrame): + assert list(si.index.values) == list(tc.columns) + np.testing.assert_approx_equal(si.values, expected) + + +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize( + "args, kwargs, expected", + [ + ( + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), + ), + {}, + np.array([[2.0]]), + ), + ( + ( + np.array([[[0, 1], [0, 0]]]), + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), + ), + {}, + np.array([[2.0]]), + ), + ( + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), + ), + {"bitssec": True}, + np.array([[0.5]]), + ), + ( + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), + ), + {"ep": nap.IntervalSet(start=0, end=7)}, + np.array([[2.0]]), + ), + ( + ( + {0: np.array([[0, 1], [0, 0]])}, + nap.TsdFrame( + t=np.arange(100), + d=np.tile(np.array([[0, 0, 1, 1], [0, 1, 0, 1]]), 25).T, + ), + ), + {"minmax": (0, 1, 0, 1)}, + np.array([[2.0]]), + ), + ], +) +def test_compute_2d_mutual_info(args, kwargs, expected): + dict_tc = args[0] + features = args[1] + si = nap.compute_2d_mutual_info(dict_tc, features, **kwargs) + assert isinstance(si, pd.DataFrame) + assert list(si.columns) == ["SI"] + if isinstance(dict_tc, dict): + assert list(si.index.values) == list(dict_tc.keys()) + np.testing.assert_approx_equal(si.values, expected) + + # ------------------------------------------------------------------------------------ # OLD TUNING CURVE TESTS # ------------------------------------------------------------------------------------ From f976bc2c64b68f10081b881da7a8dc2ecc416087 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 7 Oct 2025 11:51:57 +0000 Subject: [PATCH 155/244] re-add old tuning curve tests --- tests/test_tuning_curves.py | 76 +++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index a9c92d09c..3572a26c7 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -1063,6 +1063,82 @@ def test_compute_2d_mutual_info(args, kwargs, expected): # ------------------------------------------------------------------------------------ +@pytest.mark.parametrize( + "group, feature, nb_bins, ep, minmax, expected_exception", + [ + ("a", get_feature(), 10, get_ep(), (0, 1), "group should be a TsGroup."), + ( + get_group(), + "a", + 10, + get_ep(), + (0, 1), + r"feature should be a Tsd \(or TsdFrame with 1 column only\)", + ), + ( + get_group(), + get_feature(), + "a", + get_ep(), + (0, 1), + r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\).", + ), + (get_group(), get_feature(), 10, "a", (0, 1), r"ep should be an IntervalSet"), + ( + get_group(), + get_feature(), + 10, + get_ep(), + 1, + r"minmax should be a tuple\/list of 2 numbers", + ), + ], +) +def test_compute_1d_tuning_curves_errors( + group, feature, nb_bins, ep, minmax, expected_exception +): + with pytest.raises(TypeError, match=expected_exception): + nap.compute_1d_tuning_curves(group, feature, nb_bins, ep, minmax) + + +@pytest.mark.parametrize( + "group, features, nb_bins, ep, minmax, expected_exception", + [ + ("a", get_features(), 10, get_ep(), (0, 1), "group should be a TsGroup."), + ( + get_group(), + "a", + 10, + get_ep(), + (0, 1), + r"features should be a TsdFrame with 2 columns", + ), + ( + get_group(), + get_features(), + "a", + get_ep(), + (0, 1), + r"nb_bins should be of type int \(or tuple with \(int, int\) for 2D tuning curves\).", + ), + (get_group(), get_features(), 10, "a", (0, 1), r"ep should be an IntervalSet"), + ( + get_group(), + get_features(), + 10, + get_ep(), + 1, + r"minmax should be a tuple\/list of 2 numbers", + ), + ], +) +def test_compute_2d_tuning_curves_errors( + group, features, nb_bins, ep, minmax, expected_exception +): + with pytest.raises(TypeError, match=expected_exception): + nap.compute_2d_tuning_curves(group, features, nb_bins, ep, minmax) + + @pytest.mark.filterwarnings("ignore") @pytest.mark.parametrize( "args, kwargs, expectation", From bb9dd7fb04da5604b04c6041fbb1ab1271ba1523 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Tue, 14 Oct 2025 14:33:36 +0000 Subject: [PATCH 156/244] mutual information in user guide --- doc/user_guide/06_tuning_curves.md | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index f15857f34..d488f81da 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -16,7 +16,7 @@ kernelspec: With Pynapple you can easily compute n-dimensional tuning curves (for example, firing rate as a function of 1D angular direction or firing rate as a function of 2D position). It is also possible to compute average firing rate for different epochs -(for example firing rate for different epochs of stimulus presentation). +(for example, firing rate for different epochs of stimulus presentation). ```{code-cell} ipython3 :tags: [hide-cell] @@ -31,16 +31,6 @@ sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_par xr.set_options(display_expand_attrs=False) ``` -```{code-cell} ipython3 -:tags: [hide-cell] -group = { - 0: nap.Ts(t=np.sort(np.random.uniform(0, 100, 10))), - 1: nap.Ts(t=np.sort(np.random.uniform(0, 100, 20))), - 2: nap.Ts(t=np.sort(np.random.uniform(0, 100, 30))), -} -tsgroup = nap.TsGroup(group) -``` - ## From timestamps or continuous activity ```{code-cell} ipython3 @@ -142,7 +132,7 @@ tuning_curves_1d.plot.line(x="feature", add_legend=False) plt.show() ``` -Internally, the `compute_tuning_curves` is calling the method [`value_from`](pynapple.Tsd.value_from) which maps timestamps to their closest values in time from a `Tsd` object. +Internally, the `compute_tuning_curves` calls the [`value_from`](pynapple.Tsd.value_from) method which maps timestamps to their closest values in time from a `Tsd` object. It is then possible to validate the tuning curves by displaying the timestamps as well as their associated values. ```{code-cell} ipython3 @@ -358,3 +348,16 @@ The output will be the mean firing rate of the neuron during this set of interva mean_fr = nap.compute_discrete_tuning_curves(tsgroup, dict_ep) print(mean_fr) ``` + +# Mutual information +Given a set of tuning curves, you can use `compute_mutual_information` to compute the mutual information between the activity of the neurons and the features, no matter what dimension. + +```{code-cell} ipython3 +MI = nap.compute_mutual_information(tuning_curves_1d) +MI +``` + +```{code-cell} ipython3 +MI = nap.compute_mutual_information(tuning_curves_2d) +MI +``` From 89584307bb37b9c0dc5cf656b3b382c54a27cddb Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 16 Oct 2025 15:24:14 +0000 Subject: [PATCH 157/244] better docstring + snippet in tutorial --- doc/examples/tutorial_HD_dataset.md | 30 +++++++++++++++++++++++++---- doc/user_guide/06_tuning_curves.md | 6 ++++-- pynapple/process/tuning_curves.py | 22 +++++++++++++++++++++ 3 files changed, 52 insertions(+), 6 deletions(-) diff --git a/doc/examples/tutorial_HD_dataset.md b/doc/examples/tutorial_HD_dataset.md index 034c028f9..13f6808ef 100644 --- a/doc/examples/tutorial_HD_dataset.md +++ b/doc/examples/tutorial_HD_dataset.md @@ -19,7 +19,7 @@ The NWB file for the example is hosted on [OSF](https://osf.io/jb2gd). We show b The entire dataset can be downloaded [here](https://dandiarchive.org/dandiset/000056). ```{code-cell} ipython3 -:tags: [hide-output] +:tags: [remove-output] import scipy import pandas as pd import numpy as np @@ -74,6 +74,7 @@ To plot head-direction tuning curves, we need the spike timings and the orientat These quantities are stored in the variables 'units' and 'ry'. ```{code-cell} ipython3 +:tags: [remove-output] spikes = data["units"] # Get spike timings epochs = data["epochs"] # Get the behavioural epochs (in this case, sleep and wakefulness) angle = data["ry"] # Get the tracked orientation of the animal @@ -100,8 +101,6 @@ print(spikes_adn) Let's compute some head-direction tuning curves. To do this in Pynapple, all you need is a single line of code! -Let's plot firing rate of ADn units as a function of heading direction, i.e. a head-direction tuning curve: - ```{code-cell} ipython3 tuning_curves = nap.compute_tuning_curves( data=spikes_adn, @@ -115,7 +114,30 @@ tuning_curves ``` The output is an `xarray.DataArray` with one dimension representing units, and another for head-direction angles. -Let's compute the preferred angle quickly as follows: +We can use `compute_mutual_information` to compute the mutual information between the activity of each unit and the head direction of the mouse: + +```{code-cell} ipython3 +MI = nap.compute_mutual_information(tuning_curves) +``` + +```{code-cell} ipython3 +:tags: [hide-input] +axs = MI.hist(sharey=True) +axs[0][0].set_ylabel("# neurons") +plt.show() +``` + +We can use this as a score to select the neurons that are most modulated by HD: + +```{code-cell} ipython3 +top_n = 20 +best_neurons = MI.sort_values(by="bits/sec", ascending=False).head(top_n).index +tuning_curves = tuning_curves.sel(unit=best_neurons).sortby("unit") +spikes_adn = spikes_adn[best_neurons] +best_neurons +``` + +We can then compute the preferred angle of every neuron quickly as follows: ```{code-cell} ipython3 pref_ang = tuning_curves.idxmax(dim="head_direction") diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index d488f81da..f3e1e3998 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -61,6 +61,7 @@ tsgroup = nap.TsGroup( time_support = nap.IntervalSet(0, 100) ) ``` +Computing tuning curves is done using [`compute_tuning_curves`](pynapple.process.tuning_curves.compute_tuning_curves). When computing from general time-series, mandatory arguments are: * `data`: a `TsGroup` (or single `Ts`) or TsdFrame (or single `Tsd`) containing the neural activity of one or more units. @@ -350,8 +351,7 @@ print(mean_fr) ``` # Mutual information -Given a set of tuning curves, you can use `compute_mutual_information` to compute the mutual information between the activity of the neurons and the features, no matter what dimension. - +Given a set of tuning curves, you can use [`compute_mutual_information`](pynapple.process.tuning_curves.compute_mutual_information) to compute the mutual information between the activity of the neurons and the features, no matter what dimension. ```{code-cell} ipython3 MI = nap.compute_mutual_information(tuning_curves_1d) MI @@ -361,3 +361,5 @@ MI MI = nap.compute_mutual_information(tuning_curves_2d) MI ``` +Take a look at the tutorial on [head direction cells](../examples/tutorial_HD_dataset.md) for a realistic example. + diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 2bf58222b..18f58a7dd 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -282,6 +282,28 @@ def compute_mutual_information(tuning_curves): """ Computes mutual information from n-dimensional tuning curves. + This function implements Skaggs et al.'s [1] metric to quantify + the information content of a neuron's firing with respect to a variable + (e.g., position), based on its tuning curve. + + The mutual information in bits per second is given by: + + .. math:: + + I_\\text{bits/s} = \\sum_x P(x) \\lambda(x) \\log_2 \\left( \\frac{\\lambda(x)}{\\bar{\\lambda}} \\right) + + where: + + - :math:`P(x)` is the probability of being in bin :math:`x` (occupancy), + - :math:`\\lambda(x)` is the firing rate of the neuron in bin :math:`x`, + - :math:`\\bar{\\lambda}` is the overall mean firing rate. + + The information per spike is computed by dividing the result by the mean firing rate: + + .. math:: + + I_\\text{bits/spike} = \\frac{I}{\\bar{\\lambda}} + Parameters ---------- tuning_curves : xarray.DataArray From db679a7a0538e10a5bed10aeb62edd37d21789c7 Mon Sep 17 00:00:00 2001 From: sjvenditto Date: Thu, 16 Oct 2025 12:05:01 -0400 Subject: [PATCH 158/244] add time series in_interval method --- doc/user_guide/03_core_methods.md | 18 +++++ pynapple/core/base_class.py | 26 ++++++ pynapple/core/time_series.py | 128 ++++++++++++++++++++++++++++++ tests/test_time_series.py | 23 ++++++ 4 files changed, 195 insertions(+) diff --git a/doc/user_guide/03_core_methods.md b/doc/user_guide/03_core_methods.md index da93f9352..cd40830f5 100644 --- a/doc/user_guide/03_core_methods.md +++ b/doc/user_guide/03_core_methods.md @@ -63,6 +63,24 @@ print(epochs) print(tsdframe.restrict(epochs).time_support) ``` +### `in_interval` + +[`in_interval`](pynapple.Tsd.in_interval) is similar to [`restrict`](pynapple.Tsd.restrict), except instead of returning the restricted time series, it returns a `Tsd` of booleans for each time point indicating whether or not it falls within the intervals of an `IntervalSet`. + +```{code-cell} ipython3 +tsdframe.in_interval(epochs) +``` +```{code-cell} ipython3 +:tags: [hide-input] +plt.figure() +plt.plot(tsdframe.in_interval(epochs)) +plt.xlabel("Time (s)") +plt.title("tsdframe.in_interval(epochs)") +plt.xlim(0, 100) +plt.show() +``` + + ### `count` [`count`](pynapple.Tsd.count) returns the number of timestamps within bins or epochs of an `IntervalSet` object. diff --git a/pynapple/core/base_class.py b/pynapple/core/base_class.py index 303fc5c3a..0cceb5461 100644 --- a/pynapple/core/base_class.py +++ b/pynapple/core/base_class.py @@ -362,6 +362,32 @@ def restrict(self, iset): data = None if not hasattr(self, "values") else self.values[idx] return self._define_instance(time_array[idx], iset, values=data) + def in_interval(self, iset): + """ + Check which timestamps of the time series are within the intervals defined by an IntervalSet object + + Parameters + ---------- + iset : IntervalSet + the IntervalSet object + + Returns + ------- + Tsd + A Tsd of indicating which timestamps are within the intervals + """ + if not isinstance(iset, IntervalSet): + raise TypeError("Argument should be IntervalSet") + + time_array = self.index.values + starts = iset.start + ends = iset.end + + idx = _restrict(time_array, starts, ends) + mask = np.zeros_like(time_array, dtype=bool) + mask[idx] = True + return self._define_instance(time_array, self.time_support, values=mask) + def copy(self): """Copy the data, index and time support""" data = getattr(self, "values", None) diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index 72732c097..b52cff5bb 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -1167,6 +1167,38 @@ def restrict(self, iset): """ return _Base.restrict(self, iset) + @add_docstring("in_interval", _Base) + def in_interval(self, iset): + """ + Examples + -------- + >>> import pynapple as nap + >>> import numpy as np + >>> t = np.arange(100) + >>> ep = nap.IntervalSet(start=0, end=50) + >>> tsdtensor = nap.TsdTensor(t=t, d=np.random.randn(len(t), 4, 4)) + >>> tsdtensor.in_interval(ep) + Time (s) + ---------- -- + 0.0 1 + 1.0 1 + 2.0 1 + 3.0 1 + 4.0 1 + 5.0 1 + 6.0 1 + ... + 93.0 0 + 94.0 0 + 95.0 0 + 96.0 0 + 97.0 0 + 98.0 0 + 99.0 0 + dtype: bool, shape: (100,) + """ + return _Base.in_interval(self, iset) + @add_docstring("value_from", _Base) def value_from(self, data, ep=None, mode="closest"): """ @@ -1961,6 +1993,38 @@ def restrict(self, iset): """ return _Base.restrict(self, iset) + @add_docstring("in_interval", _Base) + def in_interval(self, iset): + """ + Examples + -------- + >>> import pynapple as nap + >>> import numpy as np + >>> t = np.arange(100) + >>> ep = nap.IntervalSet(start=0, end=50) + >>> tsdframe = nap.TsdFrame(t=t, d=np.random.randn(len(t), 4)) + >>> tsdframe.in_interval(ep) + Time (s) + ---------- -- + 0.0 1 + 1.0 1 + 2.0 1 + 3.0 1 + 4.0 1 + 5.0 1 + 6.0 1 + ... + 93.0 0 + 94.0 0 + 95.0 0 + 96.0 0 + 97.0 0 + 98.0 0 + 99.0 0 + dtype: bool, shape: (100,) + """ + return _Base.in_interval(self, iset) + @add_docstring("value_from", _Base) def value_from(self, data, ep=None, mode="closest"): """ @@ -2973,6 +3037,38 @@ def restrict(self, iset): """ return _Base.restrict(self, iset) + @add_docstring("in_interval", _Base) + def in_interval(self, iset): + """ + Examples + -------- + >>> import pynapple as nap + >>> import numpy as np + >>> t = np.arange(100) + >>> ep = nap.IntervalSet(start=0, end=50) + >>> tsd = nap.Tsd(t=t, d=np.random.randn(len(t))) + >>> tsd.in_interval(ep) + Time (s) + ---------- -- + 0.0 1 + 1.0 1 + 2.0 1 + 3.0 1 + 4.0 1 + 5.0 1 + 6.0 1 + ... + 93.0 0 + 94.0 0 + 95.0 0 + 96.0 0 + 97.0 0 + 98.0 0 + 99.0 0 + dtype: bool, shape: (100,) + """ + return _Base.in_interval(self, iset) + @add_docstring("value_from", _Base) def value_from(self, data, ep=None, mode="closest"): """ @@ -3598,6 +3694,38 @@ def restrict(self, iset): """ return _Base.restrict(self, iset) + @add_docstring("in_interval", _Base) + def in_interval(self, iset): + """ + Examples + -------- + >>> import pynapple as nap + >>> import numpy as np + >>> t = np.arange(100) + >>> ep = nap.IntervalSet(start=0, end=50) + >>> ts = nap.Ts(t) + >>> ts.in_interval(ep) + Time (s) + ---------- -- + 0.0 1 + 1.0 1 + 2.0 1 + 3.0 1 + 4.0 1 + 5.0 1 + 6.0 1 + ... + 93.0 0 + 94.0 0 + 95.0 0 + 96.0 0 + 97.0 0 + 98.0 0 + 99.0 0 + dtype: bool, shape: (100,) + """ + return _Base.in_interval(self, iset) + @add_docstring("value_from", _Base) def value_from(self, data, ep=None, mode="closest"): """ diff --git a/tests/test_time_series.py b/tests/test_time_series.py index 859d44f34..a2e1005c2 100755 --- a/tests/test_time_series.py +++ b/tests/test_time_series.py @@ -539,6 +539,29 @@ def test_restrict_inherit_time_support(self, tsd): np.testing.assert_approx_equal(tsd2.time_support.start[0], ep.start[0]) np.testing.assert_approx_equal(tsd2.time_support.end[0], ep.end[0]) + @pytest.mark.parametrize( + "ep, true_inds, false_inds", + [ + (nap.IntervalSet(start=0, end=50), np.arange(51), np.arange(51, 100)), + ( + nap.IntervalSet(start=[0, 20], end=[10, 30]), + np.hstack((np.arange(11), np.arange(20, 31))), + np.hstack((np.arange(11, 20), np.arange(31, 100))), + ), + ], + ) + def test_in_interval(self, tsd, ep, true_inds, false_inds): + tsd2 = tsd.in_interval(ep) + assert isinstance(tsd2, nap.Tsd) + assert all(tsd2.time_support.start == tsd.time_support.start) + assert all(tsd2.time_support.end == tsd.time_support.end) + assert all(tsd2[true_inds] == True) + assert all(tsd2[false_inds] == False) + + def test_in_interval_error(self, tsd): + with pytest.raises(TypeError, match=r"Argument should be IntervalSet"): + tsd.in_interval([0, 1]) + def test_get_interval(self, tsd): tsd2 = tsd.get(10, 20) assert len(tsd2) == 11 From bd80f232ea29501c580c5400d3546e49f57ca165 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 17 Oct 2025 15:16:40 +0000 Subject: [PATCH 159/244] refactor discrete tuning curves to use xarray + allow for TsdFrame input --- doc/user_guide/06_tuning_curves.md | 12 +- pynapple/process/tuning_curves.py | 157 ++++++++++------ tests/test_tuning_curves.py | 281 +++++++++++++++++++++++------ 3 files changed, 330 insertions(+), 120 deletions(-) diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index f3e1e3998..51be9ea7c 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -335,19 +335,19 @@ plt.show() When computing from epochs, you should store them in a dictionary: ```{code-cell} ipython3 -dict_ep = { +epochs_dict = { "stim0": nap.IntervalSet(start=0, end=20), "stim1":nap.IntervalSet(start=30, end=70) } ``` +You can then compute the tuning curves using [`nap.compute_discrete_tuning_curves`](pynapple.process.tuning_curves.compute_discrete_tuning_curves). +You can pass either a `TsGroup` for spikes, or a `TsdFrame` for rates/calcium activity. -[`nap.compute_discrete_tuning_curves`](pynapple.process.tuning_curves.compute_discrete_tuning_curves) takes a `TsGroup` for spiking activity and a dictionary of epochs. -The output is a pandas DataFrame where each column is a unit in the `TsGroup` and each row is one `IntervalSet`. -The output will be the mean firing rate of the neuron during this set of intervals. +The output is an `xarray.DataArray` with labeled dimensions: ```{code-cell} ipython3 -mean_fr = nap.compute_discrete_tuning_curves(tsgroup, dict_ep) -print(mean_fr) +tuning_curves = nap.compute_discrete_tuning_curves(tsgroup, epochs_dict) +tuning_curves ``` # Mutual information diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 18f58a7dd..3270d7601 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -150,7 +150,7 @@ def compute_tuning_curves( occupancy: [[150. 150.]\\n [100. 100.]\\n [150. 150.]\\n [100. 100.]] bin_edges: [array([0. , 0.25, 0.5 , 0.75, 1. ]), array([0., 1., 2.])] - In all of these cases, it is also possible to pass continuous values instead of spikes (e.g. calcium imaging data): + In all of these cases, it is also possible to pass continuous values instead of spikes (e.g. calcium imaging data), in that case the mean response is computed: >>> frame = nap.TsdFrame(d=np.random.rand(2000, 3), t=np.arange(0, 100, 0.05)) >>> tcs = nap.compute_tuning_curves(frame, feature, bins=10) @@ -226,7 +226,7 @@ def compute_tuning_curves( # occupancy occupancy, bin_edges = np.histogramdd(features, bins=bins, range=range) - # tunning curves + # tuning curves keys = ( data.keys() if isinstance(data, nap.TsGroup) @@ -278,6 +278,107 @@ def compute_tuning_curves( return tcs +def compute_discrete_tuning_curves(data, epochs_dict, return_pandas=False): + """ + Compute discrete tuning curves using a dictionary of epochs. + + Parameters + ---------- + data : TsGroup, TsdFrame, Ts, Tsd + The data for which the tuning curves will be computed. + epochs_dict : dict + Dictionary of IntervalSets. + return_pandas : bool, optional + If True, the function returns a pandas.DataFrame instead of an xarray.DataArray. + + Examples + -------- + This function is typically used for a set of discrete stimuli being presented for multiple epochs. + + >>> import pynapple as nap + >>> import numpy as np; np.random.seed(42) + >>> epochs_dict = { + ... "stim0": nap.IntervalSet(start=0, end=30), + ... "stim1":nap.IntervalSet(start=60, end=90) + ... } + >>> group = nap.TsGroup({ + ... 1: nap.Ts(np.arange(0, 100, 0.1)), + ... 2: nap.Ts(np.arange(0, 100, 0.2)) + ... }) + >>> tcs = nap.compute_discrete_tuning_curves(group, epochs_dict) + >>> tcs + Size: 32B + array([[10.03333333, 10.03333333], + [ 5.03333333, 5.03333333]]) + Coordinates: + * unit (unit) int64 16B 1 2 + * epochs (epochs) >> frame = nap.TsdFrame(d=np.random.rand(2000, 3), t=np.arange(0, 100, 0.05)) + >>> tcs = nap.compute_discrete_tuning_curves(frame, epochs_dict) + >>> tcs + Size: 48B + array([[0.50946668, 0.50897635], + [0.48343249, 0.48191892], + [0.50063158, 0.48748094]]) + Coordinates: + * unit (unit) int64 24B 0 1 2 + * epochs (epochs) >> dict_ep = { - ... "stim0": nap.IntervalSet(start=0, end=1), - ... "stim1":nap.IntervalSet(start=2, end=3) - ... } - - In this case, the function will return a pandas DataFrame : - - >>> tc - neuron0 neuron1 neuron2 - stim0 0 Hz 1 Hz 2 Hz - stim1 3 Hz 4 Hz 5 Hz - - - Parameters - ---------- - group : nap.TsGroup - The group of Ts/Tsd for which the tuning curves will be computed - dict_ep : dict - Dictionary of IntervalSets - - Returns - ------- - pandas.DataFrame - Table of firing rate for each neuron and each IntervalSet - - Raises - ------ - RuntimeError - If group is not a TsGroup object. - """ - idx = np.sort(list(dict_ep.keys())) - tuning_curves = pd.DataFrame(index=idx, columns=list(group.keys()), data=0.0) - - for k in dict_ep.keys(): - for n in group.keys(): - tuning_curves.loc[k, n] = float(len(group[n].restrict(dict_ep[k]))) - - tuning_curves.loc[k] = tuning_curves.loc[k] / dict_ep[k].tot_length("s") - - return tuning_curves - - @_validate_tuning_inputs def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): """ diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 3572a26c7..66f499cec 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -624,87 +624,217 @@ def test_compute_tuning_curves(data, features, kwargs, expectation): # ------------------------------------------------------------------------------------ -def get_group(): - return nap.TsGroup({0: nap.Ts(t=np.arange(0, 100))}) - - -def get_feature(): - return nap.Tsd( - t=np.arange(0, 100, 0.1), - d=np.arange(0, 100, 0.1) % 1.0, - time_support=nap.IntervalSet(0, 100), - ) - - -def get_features(): - tmp = np.vstack( - (np.repeat(np.arange(0, 100), 10), np.tile(np.arange(0, 100), 10)) - ).T - return nap.TsdFrame( - t=np.arange(0, 200, 0.1), - d=np.vstack((tmp, tmp[::-1])), - time_support=nap.IntervalSet(0, 200), - ) - - -def get_ep(): - return nap.IntervalSet(start=0, end=50) - - -def get_tsdframe(): - return nap.TsdFrame(t=np.arange(0, 100), d=np.ones((100, 2))) - - @pytest.mark.parametrize( - "group, dict_ep, expectation", + "data, epochs_dict, kwargs, expectation", [ + # data ( - "a", - { - 0: nap.IntervalSet(start=0, end=50), - 1: nap.IntervalSet(start=50, end=100), - }, - pytest.raises(TypeError, match="group should be a TsGroup."), + [1], + {}, + {}, + pytest.raises( + TypeError, match="data should be a TsdFrame, TsGroup, Ts, or Tsd." + ), ), ( - get_group(), - "a", + None, + {}, + {}, pytest.raises( - TypeError, match="dict_ep should be a dictionary of IntervalSet" + TypeError, match="data should be a TsdFrame, TsGroup, Ts, or Tsd." ), ), ( - get_group(), - {0: "a", 1: nap.IntervalSet(start=50, end=100)}, + {1: nap.Ts([1, 2, 3])}, + {}, + {}, pytest.raises( - TypeError, match="dict_ep argument should contain only IntervalSet." + TypeError, match="data should be a TsdFrame, TsGroup, Ts, or Tsd." ), ), + (get_group_n(1), {}, {}, does_not_raise()), + (get_group_n(3), {}, {}, does_not_raise()), + (get_group_n(1).count(0.1), {}, {}, does_not_raise()), + (get_group_n(3).count(0.1), {}, {}, does_not_raise()), + (nap.Tsd(t=[1, 2, 3], d=[1, 1, 1]), {}, {}, does_not_raise()), + (nap.Ts([1, 2, 3]), {}, {}, does_not_raise()), + # epochs_dict + ( + get_group_n(1), + 1, + {}, + pytest.raises( + TypeError, match="epochs_dict should be a dictionary of IntervalSets." + ), + ), + ( + get_group_n(1), + None, + {}, + pytest.raises( + TypeError, match="epochs_dict should be a dictionary of IntervalSets." + ), + ), + ( + get_group_n(1), + nap.IntervalSet(0, 100), + {}, + pytest.raises( + TypeError, match="epochs_dict should be a dictionary of IntervalSets." + ), + ), + ( + get_group_n(1), + [nap.IntervalSet(0, 100)], + {}, + pytest.raises( + TypeError, match="epochs_dict should be a dictionary of IntervalSets." + ), + ), + ( + get_group_n(1), + {"0": nap.IntervalSet(0, 100), "1": 0}, + {}, + pytest.raises( + TypeError, match="epochs_dict should be a dictionary of IntervalSets." + ), + ), + ( + get_group_n(1), + {"0": nap.IntervalSet(0, 100)}, + {}, + does_not_raise(), + ), + ( + get_group_n(1), + {"0": nap.IntervalSet(0, 100), "1": nap.IntervalSet(0, 50)}, + {}, + does_not_raise(), + ), + # return pandas + ( + get_group_n(1), + {}, + {"return_pandas": 1}, + pytest.raises( + TypeError, + match="return_pandas should be a boolean.", + ), + ), + ( + get_group_n(1), + {}, + {"return_pandas": "1"}, + pytest.raises( + TypeError, + match="return_pandas should be a boolean.", + ), + ), + ( + get_group_n(1), + {}, + {"return_pandas": True}, + does_not_raise(), + ), ], ) -def test_compute_discrete_tuning_curves_errors(group, dict_ep, expectation): +def test_compute_discrete_tuning_curves_type_errors( + data, epochs_dict, kwargs, expectation +): with expectation: - nap.compute_discrete_tuning_curves(group, dict_ep) + nap.compute_discrete_tuning_curves(data, epochs_dict, **kwargs) -@pytest.mark.parametrize("group", [get_group()]) @pytest.mark.parametrize( - "dict_ep", + "data, epochs_dict, kwargs, expectation", [ - {0: nap.IntervalSet(start=0, end=50), 1: nap.IntervalSet(start=50, end=100)}, - { - "0": nap.IntervalSet(start=0, end=50), - "1": nap.IntervalSet(start=50, end=100), - }, + # single rate unit, single epoch + ( + get_group_n(1).count(1.0), + {"0": nap.IntervalSet(0, 50)}, + {}, + xr.DataArray( + [[10.0]], + dims=["unit", "epochs"], + coords={"unit": [1], "epochs": ["0"]}, + ), + ), + # two rate units, single epoch + ( + get_group_n(2).count(1.0), + {"0": nap.IntervalSet(0, 50)}, + {}, + xr.DataArray( + [[10.0], [1.0]], + dims=["unit", "epochs"], + coords={"unit": [1, 2], "epochs": ["0"]}, + ), + ), + # two rate units, multiple epochs + ( + get_group_n(2).count(1.0), + {"0": nap.IntervalSet(0, 50), "1": nap.IntervalSet(50, 100)}, + {}, + xr.DataArray( + [[10.0, 10.0], [1.0, 1.0]], + dims=["unit", "epochs"], + coords={"unit": [1, 2], "epochs": ["0", "1"]}, + ), + ), + # single unit, single epoch + ( + get_group_n(1), + {"0": nap.IntervalSet(50, 100)}, + {}, + xr.DataArray( + [[10.0]], + dims=["unit", "epochs"], + coords={"unit": [1], "epochs": ["0"]}, + ), + ), + # two units, single epoch + ( + get_group_n(2), + {"0": nap.IntervalSet(0, 100)}, + {}, + xr.DataArray( + [[10.0], [1.0]], + dims=["unit", "epochs"], + coords={"unit": [1, 2], "epochs": ["0"]}, + ), + ), + # two units, multiple epochs + ( + get_group_n(2), + {"0": nap.IntervalSet(0, 100), "1": nap.IntervalSet(50, 100)}, + {}, + xr.DataArray( + [[10.0, 10.0], [1.0, 1.0]], + dims=["unit", "epochs"], + coords={"unit": [1, 2], "epochs": ["0", "1"]}, + ), + ), + # two units, multiple epochs, return_pandas=True + ( + get_group_n(2), + {"0": nap.IntervalSet(0, 100), "1": nap.IntervalSet(50, 100)}, + {"return_pandas": True}, + xr.DataArray( + [[10.0, 10.0], [1.0, 1.0]], + dims=["unit", "epochs"], + coords={"unit": [1, 2], "epochs": ["0", "1"]}, + ) + .to_pandas() + .T, + ), ], ) -def test_compute_discrete_tuning_curves(group, dict_ep): - tc = nap.compute_discrete_tuning_curves(group, dict_ep) - assert len(tc) == 2 - assert list(tc.columns) == list(group.keys()) - assert list(tc.index.values) == list(dict_ep.keys()) - np.testing.assert_almost_equal(tc.iloc[0, 0], 51 / 50) - np.testing.assert_almost_equal(tc.iloc[1, 0], 1) +def test_compute_discrete_tuning_curves(data, epochs_dict, kwargs, expectation): + tcs = nap.compute_discrete_tuning_curves(data, epochs_dict, **kwargs) + if isinstance(expectation, pd.DataFrame): + pd.testing.assert_frame_equal(tcs, expectation) + else: + xr.testing.assert_allclose(tcs, expectation) # ------------------------------------------------------------------------------------ @@ -825,6 +955,37 @@ def test_compute_mutual_information(n_units, n_features, pattern): # ------------------------------------------------------------------------------------ +def get_group(): + return nap.TsGroup({0: nap.Ts(t=np.arange(0, 100))}) + + +def get_feature(): + return nap.Tsd( + t=np.arange(0, 100, 0.1), + d=np.arange(0, 100, 0.1) % 1.0, + time_support=nap.IntervalSet(0, 100), + ) + + +def get_features(): + tmp = np.vstack( + (np.repeat(np.arange(0, 100), 10), np.tile(np.arange(0, 100), 10)) + ).T + return nap.TsdFrame( + t=np.arange(0, 200, 0.1), + d=np.vstack((tmp, tmp[::-1])), + time_support=nap.IntervalSet(0, 200), + ) + + +def get_ep(): + return nap.IntervalSet(start=0, end=50) + + +def get_tsdframe(): + return nap.TsdFrame(t=np.arange(0, 100), d=np.ones((100, 2))) + + @pytest.mark.parametrize( "tc, feature, ep, minmax, bitssec, expected_exception", [ From 5f53834db8f69151a9902432ac906ffcdb92ad6b Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 17 Oct 2025 15:31:16 +0000 Subject: [PATCH 160/244] add intersphinx setup --- doc/conf.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index 7d3570729..bac03ca7e 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -22,7 +22,7 @@ # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information project = "pynapple" -copyright = f'2021-{time.strftime("%Y")}' +copyright = f"2021-{time.strftime('%Y')}" author = "Guillaume Viejo" from importlib.metadata import version @@ -42,6 +42,7 @@ "sphinx.ext.coverage", "sphinx.ext.viewcode", # Links to source code "sphinx.ext.doctest", + "sphinx.ext.intersphinx", "sphinx_copybutton", # Adds copy button to code blocks "sphinx_design", # For layout components "myst_nb", @@ -75,6 +76,15 @@ "show-inheritance": True, } +# Intersphinx setup +intersphinx_mapping = { + "matplotlib": ("https://matplotlib.org/stable/", None), + "numpy": ("https://numpy.org/doc/stable/", None), + "python": ("https://docs.python.org/3/", None), + "pandas": ("https://pandas.pydata.org/docs/", None), + "xarray": ("https://docs.xarray.dev/en/stable/", None), +} + # apidoc_module_dir = '../pynapple' # apidoc_output_dir = 'reference' # apidoc_excluded_paths = ['tests'] From fb65e4dec2903d3414cecdc8c1e2fc48a9980518 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 17 Oct 2025 16:15:09 +0000 Subject: [PATCH 161/244] add link to skaggs for mutual info + test with more than 1 unit --- doc/user_guide/06_tuning_curves.md | 3 +++ tests/test_tuning_curves.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index f3e1e3998..4357b8cd3 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -352,6 +352,8 @@ print(mean_fr) # Mutual information Given a set of tuning curves, you can use [`compute_mutual_information`](pynapple.process.tuning_curves.compute_mutual_information) to compute the mutual information between the activity of the neurons and the features, no matter what dimension. +See the [Skaggs et al. (1992)](https://proceedings.neurips.cc/paper/1992/hash/5dd9db5e033da9c6fb5ba83c7a7ebea9-Abstract.html) paper for more information on what mutual information computes. + ```{code-cell} ipython3 MI = nap.compute_mutual_information(tuning_curves_1d) MI @@ -363,3 +365,4 @@ MI ``` Take a look at the tutorial on [head direction cells](../examples/tutorial_HD_dataset.md) for a realistic example. + diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 3572a26c7..93ee28096 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -808,7 +808,7 @@ def test_compute_mutual_information_errors(tuning_curves, expectation): @pytest.mark.parametrize( "n_units, n_features", - [(1, 1), (1, 2), (1, 3)], + [(1, 1), (1, 2), (1, 3), (2, 1), (2, 2), (2, 3)], ) @pytest.mark.parametrize( "pattern", From 255d49b95096c37ee9d6a0fc36e6c3729ead0a0b Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Fri, 17 Oct 2025 15:01:10 -0400 Subject: [PATCH 162/244] Small changes --- doc/examples/tutorial_HD_dataset.md | 7 +++++++ doc/user_guide/06_tuning_curves.md | 2 +- pynapple/process/tuning_curves.py | 8 +++++--- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/doc/examples/tutorial_HD_dataset.md b/doc/examples/tutorial_HD_dataset.md index 13f6808ef..f39572bc5 100644 --- a/doc/examples/tutorial_HD_dataset.md +++ b/doc/examples/tutorial_HD_dataset.md @@ -114,6 +114,11 @@ tuning_curves ``` The output is an `xarray.DataArray` with one dimension representing units, and another for head-direction angles. + +*** +Computing information and selecting HD cells +------------------ + We can use `compute_mutual_information` to compute the mutual information between the activity of each unit and the head direction of the mouse: ```{code-cell} ipython3 @@ -295,6 +300,8 @@ I hope this tutorial was helpful. If you have any questions, comments or suggest :::{card} Authors ^^^ +Wolf de Wulf + Dhruv Mehrotra Guillaume Viejo diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index 4357b8cd3..5177efe4f 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -64,7 +64,7 @@ tsgroup = nap.TsGroup( Computing tuning curves is done using [`compute_tuning_curves`](pynapple.process.tuning_curves.compute_tuning_curves). When computing from general time-series, mandatory arguments are: -* `data`: a `TsGroup` (or single `Ts`) or TsdFrame (or single `Tsd`) containing the neural activity of one or more units. +* `data`: a `TsGroup` (or single `Ts`) or `TsdFrame` (or single `Tsd`) containing the neural activity of one or more units. * `features`: a `Tsd` or `TsdFrame` containing one or more features. By default, 10 bins are used for all features, but you can specify the number of bins, diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 18f58a7dd..abec2caf7 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -30,9 +30,11 @@ def compute_tuning_curves( Parameters ---------- data : TsGroup, TsdFrame, Ts, Tsd - The data for which the tuning curves will be computed. + The data for which the tuning curves will be computed. This usually corresponds to the activity of the + neurons, either as spike times (TsGroup or Ts) or continuous values (TsdFrame or Tsd). features : Tsd, TsdFrame - The features (i.e. one column per feature). + The features (i.e. one column per feature). This usually corresponds to behavioral variables such as + position, head direction, speed, etc. bins : sequence or int The bin specification: @@ -307,7 +309,7 @@ def compute_mutual_information(tuning_curves): Parameters ---------- tuning_curves : xarray.DataArray - As outputted by `compute_tuning_curves`. + As computed by `compute_tuning_curves`. Returns ------- From 1e1c31d3012181c75ffc3e1ea94ad2541909ece3 Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Fri, 17 Oct 2025 16:40:20 -0400 Subject: [PATCH 163/244] Update docs --- doc/user_guide/03_core_methods.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/user_guide/03_core_methods.md b/doc/user_guide/03_core_methods.md index cd40830f5..750a97809 100644 --- a/doc/user_guide/03_core_methods.md +++ b/doc/user_guide/03_core_methods.md @@ -73,10 +73,16 @@ tsdframe.in_interval(epochs) ```{code-cell} ipython3 :tags: [hide-input] plt.figure() +plt.subplot(2,1,1) +plt.plot(tsdframe) +[plt.axvspan(s, e, alpha=0.2) for s, e in epochs.values] +plt.xlim(0, 100) +plt.subplot(2,1,2) plt.plot(tsdframe.in_interval(epochs)) plt.xlabel("Time (s)") plt.title("tsdframe.in_interval(epochs)") plt.xlim(0, 100) +plt.tight_layout() plt.show() ``` From 58dc212069464002408c3791a68e41a139f40ce3 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Sat, 18 Oct 2025 16:47:50 +0000 Subject: [PATCH 164/244] allow 0 and 1 for return_pandas --- pynapple/process/tuning_curves.py | 4 ++-- tests/test_tuning_curves.py | 40 +++++++++++++++++++++++++++++-- 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 3270d7601..ea0a90985 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -220,7 +220,7 @@ def compute_tuning_curves( ) # check return_pandas - if not isinstance(return_pandas, bool): + if not isinstance(return_pandas, bool) or return_pandas == 1 or return_pandas == 0: raise TypeError("return_pandas should be a boolean.") # occupancy @@ -343,7 +343,7 @@ def compute_discrete_tuning_curves(data, epochs_dict, return_pandas=False): raise TypeError("epochs_dict should be a dictionary of IntervalSets.") # check return_pandas - if not isinstance(return_pandas, bool): + if not isinstance(return_pandas, bool) or return_pandas == 1 or return_pandas == 0: raise TypeError("return_pandas should be a boolean.") # tuning curves diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 66f499cec..6cdce9367 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -263,7 +263,7 @@ def get_features_n(n, fs=10.0): ( get_group_n(1), get_features_n(1), - {"return_pandas": 1}, + {"return_pandas": 2}, pytest.raises( TypeError, match="return_pandas should be a boolean.", @@ -278,12 +278,30 @@ def get_features_n(n, fs=10.0): match="return_pandas should be a boolean.", ), ), + ( + get_group_n(1), + get_features_n(1), + {"return_pandas": 0}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(1), + {"return_pandas": 1}, + does_not_raise(), + ), ( get_group_n(1), get_features_n(1), {"return_pandas": True}, does_not_raise(), ), + ( + get_group_n(1), + get_features_n(1), + {"return_pandas": False}, + does_not_raise(), + ), ( get_group_n(1), get_features_n(2), @@ -715,7 +733,7 @@ def test_compute_tuning_curves(data, features, kwargs, expectation): ( get_group_n(1), {}, - {"return_pandas": 1}, + {"return_pandas": 2}, pytest.raises( TypeError, match="return_pandas should be a boolean.", @@ -736,6 +754,24 @@ def test_compute_tuning_curves(data, features, kwargs, expectation): {"return_pandas": True}, does_not_raise(), ), + ( + get_group_n(1), + {}, + {"return_pandas": False}, + does_not_raise(), + ), + ( + get_group_n(1), + {}, + {"return_pandas": 0}, + does_not_raise(), + ), + ( + get_group_n(1), + {}, + {"return_pandas": 1}, + does_not_raise(), + ), ], ) def test_compute_discrete_tuning_curves_type_errors( From bdf5c4a6bd82ffbc33b99ae20f2daaf6ce6e0876 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Sat, 18 Oct 2025 17:00:19 +0000 Subject: [PATCH 165/244] list comp + note about overlapping epochs --- pynapple/process/tuning_curves.py | 35 ++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index ea0a90985..dfca5d189 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -220,7 +220,11 @@ def compute_tuning_curves( ) # check return_pandas - if not isinstance(return_pandas, bool) or return_pandas == 1 or return_pandas == 0: + if ( + return_pandas != 1 + and return_pandas != 0 + and not isinstance(return_pandas, bool) + ): raise TypeError("return_pandas should be a boolean.") # occupancy @@ -294,6 +298,7 @@ def compute_discrete_tuning_curves(data, epochs_dict, return_pandas=False): Examples -------- This function is typically used for a set of discrete stimuli being presented for multiple epochs. + The stimulus epochs can overlap, though note that epochs within an IntervalSet can not overlap. >>> import pynapple as nap >>> import numpy as np; np.random.seed(42) @@ -343,7 +348,11 @@ def compute_discrete_tuning_curves(data, epochs_dict, return_pandas=False): raise TypeError("epochs_dict should be a dictionary of IntervalSets.") # check return_pandas - if not isinstance(return_pandas, bool) or return_pandas == 1 or return_pandas == 0: + if ( + return_pandas != 1 + and return_pandas != 0 + and not isinstance(return_pandas, bool) + ): raise TypeError("return_pandas should be a boolean.") # tuning curves @@ -352,22 +361,28 @@ def compute_discrete_tuning_curves(data, epochs_dict, return_pandas=False): if isinstance(data, nap.TsGroup) else data.columns if isinstance(data, nap.TsdFrame) else [0] ) - tcs = np.empty((len(keys), len(epochs_dict))) if isinstance(data, (nap.TsGroup, nap.Ts)): # SPIKES if isinstance(data, nap.Ts): data = {0: data} - for epoch_idx, epoch in enumerate(epochs_dict.values()): - for unit_idx, unit_label in enumerate(keys): - tcs[unit_idx, epoch_idx] = float(len(data[unit_label].restrict(epoch))) - tcs[:, epoch_idx] = tcs[:, epoch_idx] / epoch.tot_length("s") + tcs = np.stack( + [ + data.restrict(epoch).count().values.sum(axis=0) / epoch.tot_length("s") + for epoch in epochs_dict.values() + ], + axis=1, + ) else: # RATES if isinstance(data, nap.Tsd): data = np.expand_dims(data.values, -1) - for epoch_idx, epoch in enumerate(epochs_dict.values()): - for unit_idx in range(len(keys)): - tcs[unit_idx, epoch_idx] = np.mean(data[:, unit_idx].restrict(epoch)) + tcs = np.stack( + [ + data.restrict(epoch).values.mean(axis=0) + for epoch in epochs_dict.values() + ], + axis=1, + ) tcs = xr.DataArray( tcs, From f289147f2be179e245becaf1440115f8fbed891d Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Sat, 18 Oct 2025 17:05:15 +0000 Subject: [PATCH 166/244] fix tests --- pynapple/process/tuning_curves.py | 14 +++++++--- tests/test_tuning_curves.py | 45 ++++++++++++++++++++----------- 2 files changed, 40 insertions(+), 19 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index dfca5d189..bc7ec1086 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -342,8 +342,10 @@ def compute_discrete_tuning_curves(data, epochs_dict, return_pandas=False): raise TypeError("data should be a TsdFrame, TsGroup, Ts, or Tsd.") # check epochs_dict - if not isinstance(epochs_dict, dict) or not all( - isinstance(epoch, nap.IntervalSet) for epoch in epochs_dict.values() + if ( + not isinstance(epochs_dict, dict) + or len(epochs_dict) == 0 + or not all(isinstance(epoch, nap.IntervalSet) for epoch in epochs_dict.values()) ): raise TypeError("epochs_dict should be a dictionary of IntervalSets.") @@ -364,7 +366,7 @@ def compute_discrete_tuning_curves(data, epochs_dict, return_pandas=False): if isinstance(data, (nap.TsGroup, nap.Ts)): # SPIKES if isinstance(data, nap.Ts): - data = {0: data} + data = nap.TsGroup({0: data}, time_support=data.time_support) tcs = np.stack( [ data.restrict(epoch).count().values.sum(axis=0) / epoch.tot_length("s") @@ -375,7 +377,11 @@ def compute_discrete_tuning_curves(data, epochs_dict, return_pandas=False): else: # RATES if isinstance(data, nap.Tsd): - data = np.expand_dims(data.values, -1) + data = nap.TsdFrame( + d=np.expand_dims(data.values, -1), + t=data.times(), + time_support=data.time_support, + ) tcs = np.stack( [ data.restrict(epoch).values.mean(axis=0) diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 6cdce9367..abd28174b 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -648,7 +648,7 @@ def test_compute_tuning_curves(data, features, kwargs, expectation): # data ( [1], - {}, + {"0": nap.IntervalSet(0, 100)}, {}, pytest.raises( TypeError, match="data should be a TsdFrame, TsGroup, Ts, or Tsd." @@ -656,7 +656,7 @@ def test_compute_tuning_curves(data, features, kwargs, expectation): ), ( None, - {}, + {"0": nap.IntervalSet(0, 100)}, {}, pytest.raises( TypeError, match="data should be a TsdFrame, TsGroup, Ts, or Tsd." @@ -664,18 +664,33 @@ def test_compute_tuning_curves(data, features, kwargs, expectation): ), ( {1: nap.Ts([1, 2, 3])}, - {}, + {"0": nap.IntervalSet(0, 100)}, {}, pytest.raises( TypeError, match="data should be a TsdFrame, TsGroup, Ts, or Tsd." ), ), - (get_group_n(1), {}, {}, does_not_raise()), - (get_group_n(3), {}, {}, does_not_raise()), - (get_group_n(1).count(0.1), {}, {}, does_not_raise()), - (get_group_n(3).count(0.1), {}, {}, does_not_raise()), - (nap.Tsd(t=[1, 2, 3], d=[1, 1, 1]), {}, {}, does_not_raise()), - (nap.Ts([1, 2, 3]), {}, {}, does_not_raise()), + (get_group_n(1), {"0": nap.IntervalSet(0, 100)}, {}, does_not_raise()), + (get_group_n(3), {"0": nap.IntervalSet(0, 100)}, {}, does_not_raise()), + ( + get_group_n(1).count(0.1), + {"0": nap.IntervalSet(0, 100)}, + {}, + does_not_raise(), + ), + ( + get_group_n(3).count(0.1), + {"0": nap.IntervalSet(0, 100)}, + {}, + does_not_raise(), + ), + ( + nap.Tsd(t=[1, 2, 3], d=[1, 1, 1]), + {"0": nap.IntervalSet(0, 100)}, + {}, + does_not_raise(), + ), + (nap.Ts([1, 2, 3]), {"0": nap.IntervalSet(0, 100)}, {}, does_not_raise()), # epochs_dict ( get_group_n(1), @@ -732,7 +747,7 @@ def test_compute_tuning_curves(data, features, kwargs, expectation): # return pandas ( get_group_n(1), - {}, + {"0": nap.IntervalSet(0, 100)}, {"return_pandas": 2}, pytest.raises( TypeError, @@ -741,7 +756,7 @@ def test_compute_tuning_curves(data, features, kwargs, expectation): ), ( get_group_n(1), - {}, + {"0": nap.IntervalSet(0, 100)}, {"return_pandas": "1"}, pytest.raises( TypeError, @@ -750,25 +765,25 @@ def test_compute_tuning_curves(data, features, kwargs, expectation): ), ( get_group_n(1), - {}, + {"0": nap.IntervalSet(0, 100)}, {"return_pandas": True}, does_not_raise(), ), ( get_group_n(1), - {}, + {"0": nap.IntervalSet(0, 100)}, {"return_pandas": False}, does_not_raise(), ), ( get_group_n(1), - {}, + {"0": nap.IntervalSet(0, 100)}, {"return_pandas": 0}, does_not_raise(), ), ( get_group_n(1), - {}, + {"0": nap.IntervalSet(0, 100)}, {"return_pandas": 1}, does_not_raise(), ), From e35980a7c240dc76257a62050f35a78c5e0177c3 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Sat, 18 Oct 2025 17:15:20 +0000 Subject: [PATCH 167/244] more tests for discrete epochs --- tests/test_tuning_curves.py | 50 +++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index abd28174b..eb290f879 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -832,6 +832,31 @@ def test_compute_discrete_tuning_curves_type_errors( coords={"unit": [1, 2], "epochs": ["0", "1"]}, ), ), + # two rate units, multiple epochs, overlapping + ( + get_group_n(2).count(1.0), + {"0": nap.IntervalSet(0, 100), "1": nap.IntervalSet(50, 100)}, + {}, + xr.DataArray( + [[10.0, 10.0], [1.0, 1.0]], + dims=["unit", "epochs"], + coords={"unit": [1, 2], "epochs": ["0", "1"]}, + ), + ), + # two rate units, multiple epochs, multiple intervals + ( + get_group_n(2).count(1.0), + { + "0": nap.IntervalSet([0, 20], [10, 30]), + "1": nap.IntervalSet([50, 70], [60, 80]), + }, + {}, + xr.DataArray( + [[10.0, 10.0], [1.0, 1.0]], + dims=["unit", "epochs"], + coords={"unit": [1, 2], "epochs": ["0", "1"]}, + ), + ), # single unit, single epoch ( get_group_n(1), @@ -855,6 +880,17 @@ def test_compute_discrete_tuning_curves_type_errors( ), ), # two units, multiple epochs + ( + get_group_n(2), + {"0": nap.IntervalSet(0, 49.9999), "1": nap.IntervalSet(50, 100)}, + {}, + xr.DataArray( + [[10.0, 10.0], [1.0, 1.0]], + dims=["unit", "epochs"], + coords={"unit": [1, 2], "epochs": ["0", "1"]}, + ), + ), + # two units, multiple epochs, overlapping ( get_group_n(2), {"0": nap.IntervalSet(0, 100), "1": nap.IntervalSet(50, 100)}, @@ -865,6 +901,20 @@ def test_compute_discrete_tuning_curves_type_errors( coords={"unit": [1, 2], "epochs": ["0", "1"]}, ), ), + # two units, multiple epochs, multiple intervals + ( + get_group_n(2), + { + "0": nap.IntervalSet([0, 20], [10, 30]), + "1": nap.IntervalSet([50, 70], [60, 80]), + }, + {}, + xr.DataArray( + [[10.1, 10.1], [1.1, 1.1]], + dims=["unit", "epochs"], + coords={"unit": [1, 2], "epochs": ["0", "1"]}, + ), + ), # two units, multiple epochs, return_pandas=True ( get_group_n(2), From 8409490ebfc868ecd61687675a7f90d1cb126467 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 20 Oct 2025 14:47:21 +0000 Subject: [PATCH 168/244] return_counts argument --- pynapple/process/tuning_curves.py | 17 +++++- tests/test_tuning_curves.py | 87 ++++++++++++++++++++++++++++++- 2 files changed, 102 insertions(+), 2 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index abec2caf7..52dc3de0e 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -23,6 +23,7 @@ def compute_tuning_curves( fs=None, feature_names=None, return_pandas=False, + return_counts=False, ): """ Computes n-dimensional tuning curves relative to n features. @@ -61,6 +62,11 @@ def compute_tuning_curves( If True, the function returns a pandas.DataFrame instead of an xarray.DataArray. Note that this will not work if the features are not 1D and that occupancy and bin edges will not be stored as attributes. + return_counts : bool, optional + If True, does not divide the spike counts by occupancy, but returns the counts directly. + The occupancy is stored in the xarray attributes, so the division can be performed after any + particular processing steps. + If the input is a TsdFrame, this does not do anything. Returns ------- @@ -225,6 +231,14 @@ def compute_tuning_curves( if not isinstance(return_pandas, bool): raise TypeError("return_pandas should be a boolean.") + # check return_counts + if ( + return_counts != 1 + and return_counts != 0 + and not isinstance(return_counts, bool) + ): + raise TypeError("return_counts should be a boolean.") + # occupancy occupancy, bin_edges = np.histogramdd(features, bins=bins, range=range) @@ -245,7 +259,8 @@ def compute_tuning_curves( bins=bin_edges, )[0] occupancy[occupancy == 0.0] = np.nan - tcs = (tcs / occupancy) * fs + if not return_counts: + tcs = (tcs / occupancy) * fs else: # RATES values = data.value_from(features, epochs) diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 93ee28096..59532608a 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -259,7 +259,7 @@ def get_features_n(n, fs=10.0): {"feature_names": ("feature0", "feature1")}, does_not_raise(), ), - # return pandas + # return_pandas ( get_group_n(1), get_features_n(1), @@ -293,6 +293,49 @@ def get_features_n(n, fs=10.0): match="Cannot convert arrays with 3 dimensions into pandas objects. Requires 2 or fewer dimensions.", ), ), + # return_counts + ( + get_group_n(1), + get_features_n(1), + {"return_counts": 2}, + pytest.raises( + TypeError, + match="return_counts should be a boolean.", + ), + ), + ( + get_group_n(1), + get_features_n(1), + {"return_counts": "1"}, + pytest.raises( + TypeError, + match="return_counts should be a boolean.", + ), + ), + ( + get_group_n(1), + get_features_n(1), + {"return_counts": 0}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(1), + {"return_counts": 1}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(1), + {"return_counts": True}, + does_not_raise(), + ), + ( + get_group_n(1), + get_features_n(1), + {"return_counts": False}, + does_not_raise(), + ), ], ) def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): @@ -609,6 +652,48 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): .to_pandas() .T, ), + # single unit, single feature, return_counts=True + ( + get_group_n(1), + get_features_n(1), + {"return_counts": True}, + xr.DataArray( + np.full((1, 10), 100.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # single unit, single feature, return_counts=True + ( + get_group_n(1), + get_features_n(1), + {"return_counts": True}, + xr.DataArray( + np.full((1, 10), 100.0), + dims=["unit", "feature0"], + coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + ), + ), + # multiple units, multiple features, return_counts=True + ( + get_group_n(2), + get_features_n(2), + {"return_counts": True}, + xr.DataArray( + np.stack( + [ + np.where(np.eye(10), 100.0, 0.0), + np.where(np.eye(10), 10.0, 0.0), + ] + ), + dims=["unit", "feature0", "feature1"], + coords={ + "unit": [1, 2], + "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, + "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, + }, + ), + ), ], ) def test_compute_tuning_curves(data, features, kwargs, expectation): From 288f3bedbd95022b6e36f70799822eec0d6ac79c Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 20 Oct 2025 14:49:22 +0000 Subject: [PATCH 169/244] remove duplicate test --- tests/test_tuning_curves.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 59532608a..951270a3c 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -663,17 +663,6 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, ), ), - # single unit, single feature, return_counts=True - ( - get_group_n(1), - get_features_n(1), - {"return_counts": True}, - xr.DataArray( - np.full((1, 10), 100.0), - dims=["unit", "feature0"], - coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, - ), - ), # multiple units, multiple features, return_counts=True ( get_group_n(2), From 3de8bab8b6b13b51ff2f72e1eaeb667ba54fadef Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 20 Oct 2025 15:31:31 +0000 Subject: [PATCH 170/244] add fs to attributes --- pynapple/process/tuning_curves.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 52dc3de0e..cc6f9ba9b 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -287,7 +287,7 @@ def compute_tuning_curves( for feature_name, e in zip(feature_names, bin_edges) }, }, - attrs={"occupancy": occupancy, "bin_edges": bin_edges}, + attrs={"occupancy": occupancy, "bin_edges": bin_edges, "fs": fs}, ) if return_pandas: return tcs.to_pandas().T From 5af7dcfdb4c48a1d1c8bff044100a6f1c0a4fb3d Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 20 Oct 2025 15:40:15 +0000 Subject: [PATCH 171/244] rename to compute_response_per_epoch -> add deprecated funciton --- doc/user_guide/06_tuning_curves.md | 4 +- pynapple/process/__init__.py | 1 + pynapple/process/tuning_curves.py | 23 +++++++++-- tests/test_tuning_curves.py | 64 +++++++++++++++++++++++++++--- 4 files changed, 80 insertions(+), 12 deletions(-) diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index 51be9ea7c..cb02dc8d0 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -340,13 +340,13 @@ epochs_dict = { "stim1":nap.IntervalSet(start=30, end=70) } ``` -You can then compute the tuning curves using [`nap.compute_discrete_tuning_curves`](pynapple.process.tuning_curves.compute_discrete_tuning_curves). +You can then compute the tuning curves using [`nap.compute_response_per_epoch`](pynapple.process.tuning_curves.compute_response_per_epoch). You can pass either a `TsGroup` for spikes, or a `TsdFrame` for rates/calcium activity. The output is an `xarray.DataArray` with labeled dimensions: ```{code-cell} ipython3 -tuning_curves = nap.compute_discrete_tuning_curves(tsgroup, epochs_dict) +tuning_curves = nap.compute_response_per_epoch(tsgroup, epochs_dict) tuning_curves ``` diff --git a/pynapple/process/__init__.py b/pynapple/process/__init__.py index 58ae66af4..3cbb079a4 100644 --- a/pynapple/process/__init__.py +++ b/pynapple/process/__init__.py @@ -37,6 +37,7 @@ compute_2d_tuning_curves_continuous, compute_discrete_tuning_curves, compute_mutual_information, + compute_response_per_epoch, compute_tuning_curves, ) from .warping import build_tensor, warp_tensor diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index bc7ec1086..37d7befed 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -282,9 +282,9 @@ def compute_tuning_curves( return tcs -def compute_discrete_tuning_curves(data, epochs_dict, return_pandas=False): +def compute_response_per_epoch(data, epochs_dict, return_pandas=False): """ - Compute discrete tuning curves using a dictionary of epochs. + Compute mean response per epoch, given a dictionary of epochs. Parameters ---------- @@ -310,7 +310,7 @@ def compute_discrete_tuning_curves(data, epochs_dict, return_pandas=False): ... 1: nap.Ts(np.arange(0, 100, 0.1)), ... 2: nap.Ts(np.arange(0, 100, 0.2)) ... }) - >>> tcs = nap.compute_discrete_tuning_curves(group, epochs_dict) + >>> tcs = nap.compute_response_per_epoch(group, epochs_dict) >>> tcs Size: 32B array([[10.03333333, 10.03333333], @@ -322,7 +322,7 @@ def compute_discrete_tuning_curves(data, epochs_dict, return_pandas=False): You can also pass a TsdFrame (e.g. calcium imaging data), in that case the response is computed: >>> frame = nap.TsdFrame(d=np.random.rand(2000, 3), t=np.arange(0, 100, 0.05)) - >>> tcs = nap.compute_discrete_tuning_curves(frame, epochs_dict) + >>> tcs = nap.compute_response_per_epoch(frame, epochs_dict) >>> tcs Size: 48B array([[0.50946668, 0.50897635], @@ -676,6 +676,21 @@ def compute_2d_tuning_curves_continuous( return tcs, bins +@_validate_tuning_inputs +def compute_discrete_tuning_curves(group, dict_ep): + """ + Deprecated, use `compute_response_per_epoch` instead. + """ + warnings.warn( + "compute_discrete_tuning_curves is deprecated and will be removed in a future version;" + "use compute_response_per_epoch instead.", + DeprecationWarning, + stacklevel=2, + ) + + return compute_response_per_epoch(group, dict_ep, return_pandas=True) + + @_validate_tuning_inputs def compute_2d_mutual_info(dict_tc, features, ep=None, minmax=None, bitssec=False): """ diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index eb290f879..a36e85224 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -789,11 +789,9 @@ def test_compute_tuning_curves(data, features, kwargs, expectation): ), ], ) -def test_compute_discrete_tuning_curves_type_errors( - data, epochs_dict, kwargs, expectation -): +def test_compute_response_per_epoch_type_errors(data, epochs_dict, kwargs, expectation): with expectation: - nap.compute_discrete_tuning_curves(data, epochs_dict, **kwargs) + nap.compute_response_per_epoch(data, epochs_dict, **kwargs) @pytest.mark.parametrize( @@ -930,8 +928,8 @@ def test_compute_discrete_tuning_curves_type_errors( ), ], ) -def test_compute_discrete_tuning_curves(data, epochs_dict, kwargs, expectation): - tcs = nap.compute_discrete_tuning_curves(data, epochs_dict, **kwargs) +def test_compute_response_per_epoch(data, epochs_dict, kwargs, expectation): + tcs = nap.compute_response_per_epoch(data, epochs_dict, **kwargs) if isinstance(expectation, pd.DataFrame): pd.testing.assert_frame_equal(tcs, expectation) else: @@ -1325,6 +1323,60 @@ def test_compute_2d_mutual_info(args, kwargs, expected): # ------------------------------------------------------------------------------------ +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize( + "group, dict_ep, expectation", + [ + ( + "a", + { + 0: nap.IntervalSet(start=0, end=50), + 1: nap.IntervalSet(start=50, end=100), + }, + pytest.raises(TypeError, match="group should be a TsGroup."), + ), + ( + get_group(), + "a", + pytest.raises( + TypeError, match="dict_ep should be a dictionary of IntervalSet" + ), + ), + ( + get_group(), + {0: "a", 1: nap.IntervalSet(start=50, end=100)}, + pytest.raises( + TypeError, match="dict_ep argument should contain only IntervalSet." + ), + ), + ], +) +def test_compute_discrete_tuning_curves_errors(group, dict_ep, expectation): + with expectation: + nap.compute_discrete_tuning_curves(group, dict_ep) + + +@pytest.mark.filterwarnings("ignore") +@pytest.mark.parametrize("group", [get_group()]) +@pytest.mark.parametrize( + "dict_ep", + [ + {0: nap.IntervalSet(start=0, end=50), 1: nap.IntervalSet(start=50, end=100)}, + { + "0": nap.IntervalSet(start=0, end=50), + "1": nap.IntervalSet(start=50, end=100), + }, + ], +) +def test_compute_discrete_tuning_curves(group, dict_ep): + tc = nap.compute_discrete_tuning_curves(group, dict_ep) + assert len(tc) == 2 + assert list(tc.columns) == list(group.keys()) + assert list(tc.index.values) == list(dict_ep.keys()) + np.testing.assert_almost_equal(tc.iloc[0, 0], 51 / 50) + np.testing.assert_almost_equal(tc.iloc[1, 0], 1) + + @pytest.mark.parametrize( "group, feature, nb_bins, ep, minmax, expected_exception", [ From 24b504cf7707a021425a8401577f02d14ad85922 Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Mon, 20 Oct 2025 12:39:08 -0400 Subject: [PATCH 172/244] Fix 504 issue --- pynapple/core/time_series.py | 17 ++++++++++++ tests/test_numpy_compatibility.py | 44 +++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index b0c66f538..913374c96 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -16,6 +16,7 @@ import abc import importlib +import inspect import warnings from numbers import Number @@ -303,6 +304,22 @@ def __array_function__(self, func, types, args, kwargs): new_args.append(a) out = func._implementation(*new_args, **kwargs) + + if func in [np.transpose]: + return out + + # Special case for array with symmetrical shapes and some functions like sum + if self.ndim > 1 and np.all(self.shape[0] == np.array(self.shape)): + # The output should have fewer dimensions + if out.ndim < self.ndim: + # Need to check axis + sig = inspect.signature(func) + bound = sig.bind_partial(*new_args, **kwargs) + axis = bound.arguments.get("axis", None) + # If axis = 0, the time axis disappear so should return a numpy array + if axis == 0: + return out + return _initialize_tsd_output(self, out) def as_array(self): diff --git a/tests/test_numpy_compatibility.py b/tests/test_numpy_compatibility.py index c4323147f..05495d4d4 100644 --- a/tests/test_numpy_compatibility.py +++ b/tests/test_numpy_compatibility.py @@ -1,3 +1,5 @@ +from numbers import Number + import numpy as np import numpy.core.umath as _umath import pytest @@ -500,3 +502,45 @@ def test_concatenate(self, tsd): def test_fft(self, tsd): with pytest.raises(TypeError): np.fft.fft(tsd) + + +@pytest.mark.parametrize( + "tsd", + [ + nap.TsdFrame( + t=np.arange(10), + d=np.random.rand(10, 10), + ), + nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 10, 10), time_units="s"), + ], +) +@pytest.mark.parametrize( + "func, kwargs", + [ + ("sum", {}), + ("sum", {"axis": 0}), + ("sum", {"axis": 1}), + ("sum", {"axis": -1}), + ("sum", {"axis": (0, 1)}), + ], +) +def test_square_arrays(tsd, func, kwargs): + a = getattr(np, func)(tsd, **kwargs) + b = getattr(np, func)(tsd.values, **kwargs) + + if "axis" in kwargs: + axis = kwargs["axis"] + else: + axis = None + + if axis is None or np.isscalar(b): + assert np.isscalar(a) + assert a == b + else: + if axis == 0: + assert isinstance(a, (np.ndarray, Number)) + np.testing.assert_array_almost_equal(a, b) + else: + assert not isinstance(a, np.ndarray) + np.testing.assert_array_almost_equal(a.index, tsd.index) + np.testing.assert_array_almost_equal(a.values, b) From e51a917a8f85d9ec6f74c8562cc16d55fbce94c5 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 20 Oct 2025 16:45:38 +0000 Subject: [PATCH 173/244] add tests for attributes --- tests/test_tuning_curves.py | 155 ++++++++++++++++++++++++++++++++++++ 1 file changed, 155 insertions(+) diff --git a/tests/test_tuning_curves.py b/tests/test_tuning_curves.py index 951270a3c..05f77d1dd 100644 --- a/tests/test_tuning_curves.py +++ b/tests/test_tuning_curves.py @@ -355,6 +355,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): np.full((1, 10), 10.0), dims=["unit", "feature0"], coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + attrs={ + "fs": 10.0, + "occupancy": np.full(10, 100.0), + "bin_edges": [np.linspace(0, 9.9, 11)], + }, ), ), # multiple rate units, single feature @@ -369,6 +374,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): "unit": [1, 2], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, }, + attrs={ + "fs": 10.0, + "occupancy": np.full(10, 100.0), + "bin_edges": [np.linspace(0, 9.9, 11)], + }, ), ), # multiple rate units, multiple features @@ -389,6 +399,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, }, + attrs={ + "fs": 10.0, + "occupancy": np.where(np.eye(10), 100.0, 0.0), + "bin_edges": [np.linspace(0, i * 9.9, 11) for i in range(1, 3)], + }, ), ), # single unit, single feature @@ -400,6 +415,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): np.full((1, 10), 10.0), dims=["unit", "feature0"], coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + attrs={ + "fs": 10.0, + "occupancy": np.full(10, 100.0), + "bin_edges": [np.linspace(0, 9.9, 11)], + }, ), ), # multiple units, single feature @@ -414,6 +434,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): "unit": [1, 2], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, }, + attrs={ + "fs": 10.0, + "occupancy": np.full(10, 100.0), + "bin_edges": [np.linspace(0, 9.9, 11)], + }, ), ), # multiple units, multiple features @@ -434,6 +459,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, }, + attrs={ + "fs": 10.0, + "occupancy": np.where(np.eye(10), 100.0, np.nan), + "bin_edges": [np.linspace(0, i * 9.9, 11) for i in range(1, 3)], + }, ), ), # single unit, single feature, specified number of bins @@ -445,6 +475,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): np.full((1, 5), 10.0), dims=["unit", "feature0"], coords={"unit": [1], "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99}, + attrs={ + "fs": 10.0, + "occupancy": np.full(5, 200.0), + "bin_edges": [np.linspace(0, 9.9, 6)], + }, ), ), # single unit, multiple features, specified number of bins @@ -460,6 +495,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99, "feature1": np.linspace(0, 19.8, 6)[:-1] + 1.98, }, + attrs={ + "fs": 10.0, + "occupancy": np.where(np.eye(5), 200.0, np.nan), + "bin_edges": [np.linspace(0, i * 9.9, 6) for i in range(1, 3)], + }, ), ), # single unit, multiple features, specified number of bins per feature @@ -485,6 +525,19 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): "feature0": np.linspace(0, 9.9, 6)[:-1] + 0.99, "feature1": np.linspace(0, 19.8, 5)[:-1] + 2.475, }, + attrs={ + "fs": 10.0, + "occupancy": np.array( + [ + [200.0, np.nan, np.nan, np.nan], + [50.0, 150.0, np.nan, np.nan], + [np.nan, 100.0, 100.0, np.nan], + [np.nan, np.nan, 150.0, 50.0], + [np.nan, np.nan, np.nan, 200.0], + ] + ), + "bin_edges": [np.linspace(0, 9.9, 6), np.linspace(0, 19.8, 5)], + }, ), ), # single unit, single feature, specified bins @@ -496,6 +549,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): np.full((1, 5), 10.0), dims=["unit", "feature0"], coords={"unit": [1], "feature0": np.arange(1, 11, 2)}, + attrs={ + "fs": 10.0, + "occupancy": np.full(5, 200.0), + "bin_edges": [np.linspace(0, 10, 6)], + }, ), ), # single unit, multiple features, specified bins @@ -511,6 +569,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): "feature0": np.arange(1, 11, 2), "feature1": np.arange(2, 22, 4), }, + attrs={ + "fs": 10.0, + "occupancy": np.where(np.eye(5), 200.0, np.nan), + "bin_edges": [np.linspace(0, i * 10, 6) for i in range(1, 3)], + }, ), ), # single unit, single feature, specified range @@ -522,6 +585,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): np.full((1, 10), 10.0), dims=["unit", "feature0"], coords={"unit": [1], "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25}, + attrs={ + "fs": 10.0, + "occupancy": np.concatenate([np.full(9, 50.0), [60]]), + "bin_edges": [np.linspace(0, 5.0, 11)], + }, ), ), # single unit, multiple features, specified range per feature @@ -537,6 +605,16 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, }, + attrs={ + "fs": 10.0, + "occupancy": np.where( + np.eye(10), + 50.0 + + 10.0 * (np.arange(10) == 9)[:, None] * (np.arange(10) == 9), + np.nan, + ), + "bin_edges": [np.linspace(0, i * 5, 11) for i in range(1, 3)], + }, ), ), # single unit, single feature, specified range and number of bins @@ -548,6 +626,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): np.full((1, 10), 10.0), dims=["unit", "feature0"], coords={"unit": [1], "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25}, + attrs={ + "fs": 10.0, + "occupancy": np.concatenate([np.full(9, 50.0), [60]]), + "bin_edges": [np.linspace(0, 5.0, 11)], + }, ), ), # single unit, multiple features, specified range per feature and number of bins @@ -563,6 +646,16 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, }, + attrs={ + "fs": 10.0, + "occupancy": np.where( + np.eye(10), + 50.0 + + 10.0 * (np.arange(10) == 9)[:, None] * (np.arange(10) == 9), + np.nan, + ), + "bin_edges": [np.linspace(0, i * 5, 11) for i in range(1, 3)], + }, ), ), # single unit, multiple features, specified range and number of bins per feature @@ -578,6 +671,16 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): "feature0": np.linspace(0, 5.0, 11)[:-1] + 0.25, "feature1": np.linspace(0, 10.0, 11)[:-1] + 0.5, }, + attrs={ + "fs": 10.0, + "occupancy": np.where( + np.eye(10), + 50.0 + + 10.0 * (np.arange(10) == 9)[:, None] * (np.arange(10) == 9), + np.nan, + ), + "bin_edges": [np.linspace(0, i * 5, 11) for i in range(1, 3)], + }, ), ), # single unit, single feature, specified epochs (smaller) @@ -589,6 +692,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): np.full((1, 10), 10.0), dims=["unit", "feature0"], coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + attrs={ + "fs": 10.0, + "occupancy": np.concatenate([[51], np.full(9, 50.0)]), + "bin_edges": [np.linspace(0, 9.9, 11)], + }, ), ), # single unit, single feature, specified epochs (larger) @@ -600,6 +708,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): np.full((1, 10), 10.0), dims=["unit", "feature0"], coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + attrs={ + "fs": 10.0, + "occupancy": np.full(10, 100.0), + "bin_edges": [np.linspace(0, 9.9, 11)], + }, ), ), # single unit, single feature, specified epochs (multiple) @@ -611,6 +724,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): np.full((1, 10), 10.0), dims=["unit", "feature0"], coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + attrs={ + "fs": 10.0, + "occupancy": np.concatenate([[42], np.full(9, 40.0)]), + "bin_edges": [np.linspace(0, 9.9, 11)], + }, ), ), # single unit, single feature, specified feature name @@ -622,6 +740,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): np.full((1, 10), 10.0), dims=["unit", "f0"], coords={"unit": [1], "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + attrs={ + "fs": 10.0, + "occupancy": np.full(10, 100.0), + "bin_edges": [np.linspace(0, 9.9, 11)], + }, ), ), # single unit, multiple features, specified feature names @@ -637,6 +760,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): "f0": np.linspace(0, 9.9, 11)[:-1] + 0.495, "f1": np.linspace(0, 19.8, 11)[:-1] + 0.99, }, + attrs={ + "fs": 10.0, + "occupancy": np.where(np.eye(10), 100.0, np.nan), + "bin_edges": [np.linspace(0, i * 9.9, 11) for i in range(1, 3)], + }, ), ), # single unit, single feature, return_pandas=True @@ -648,6 +776,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): np.full((1, 10), 10.0), dims=["unit", "feature0"], coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + attrs={ + "fs": 10.0, + "occupancy": np.full(10, 100.0), + "bin_edges": [np.linspace(0, i * 9.9, 11) for i in range(1, 2)], + }, ) .to_pandas() .T, @@ -661,6 +794,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): np.full((1, 10), 100.0), dims=["unit", "feature0"], coords={"unit": [1], "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495}, + attrs={ + "fs": 10.0, + "occupancy": np.full(10, 100.0), + "bin_edges": [np.linspace(0, i * 9.9, 11) for i in range(1, 2)], + }, ), ), # multiple units, multiple features, return_counts=True @@ -681,6 +819,11 @@ def test_compute_tuning_curves_type_errors(data, features, kwargs, expectation): "feature0": np.linspace(0, 9.9, 11)[:-1] + 0.495, "feature1": np.linspace(0, 19.8, 11)[:-1] + 0.99, }, + attrs={ + "fs": 10.0, + "occupancy": np.where(np.eye(10), 100.0, np.nan), + "bin_edges": [np.linspace(0, i * 9.9, 11) for i in range(1, 3)], + }, ), ), ], @@ -691,6 +834,18 @@ def test_compute_tuning_curves(data, features, kwargs, expectation): pd.testing.assert_frame_equal(tcs, expectation) else: xr.testing.assert_allclose(tcs, expectation) + for attribute in expectation.attrs: + assert attribute in tcs.attrs + if isinstance(expectation.attrs[attribute], (np.ndarray, float)): + print(tcs.attrs[attribute]) + np.testing.assert_array_almost_equal( + tcs.attrs[attribute], expectation.attrs[attribute] + ) + else: + for i in range(len(expectation.attrs[attribute])): + np.testing.assert_array_almost_equal( + tcs.attrs[attribute][i], expectation.attrs[attribute][i] + ) # ------------------------------------------------------------------------------------ From 482509c66bbf4f6f30945fc170a4361765bb2f7c Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 20 Oct 2025 16:55:22 +0000 Subject: [PATCH 174/244] add check to jitrestrict --- pynapple/core/_jitted_functions.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pynapple/core/_jitted_functions.py b/pynapple/core/_jitted_functions.py index 33057b3cf..a74233a8a 100644 --- a/pynapple/core/_jitted_functions.py +++ b/pynapple/core/_jitted_functions.py @@ -15,8 +15,10 @@ def jitrestrict(time_array, starts, ends): t = 0 x = 0 - while ends[k] < time_array[t]: + while k < m and ends[k] < time_array[t]: k += 1 + if k == m: + return np.empty(0, dtype=np.int64) while k < m: # Outside From eee568b81127b5211a5a88ba5c686e5feebe4ee8 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 20 Oct 2025 17:53:01 +0000 Subject: [PATCH 175/244] don't pass epochs to value from --- pynapple/process/tuning_curves.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index cc6f9ba9b..f3dd06d03 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -255,7 +255,7 @@ def compute_tuning_curves( data = {0: data} for i, n in enumerate(keys): tcs[i] = np.histogramdd( - data[n].value_from(features, epochs), + data[n].value_from(features), bins=bin_edges, )[0] occupancy[occupancy == 0.0] = np.nan @@ -263,7 +263,7 @@ def compute_tuning_curves( tcs = (tcs / occupancy) * fs else: # RATES - values = data.value_from(features, epochs) + values = data.value_from(features) if isinstance(data, nap.Tsd): data = np.expand_dims(data.values, -1) counts = np.histogramdd(values, bins=bin_edges)[0] From 681efae8cd43cad75a23a7dff786cc3f113ff5f2 Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Mon, 20 Oct 2025 14:20:44 -0400 Subject: [PATCH 176/244] Update tests --- pynapple/core/time_series.py | 31 ++++++++++++++++------ tests/test_numpy_compatibility.py | 43 +++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 8 deletions(-) diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index 913374c96..cde59f3c0 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -305,20 +305,35 @@ def __array_function__(self, func, types, args, kwargs): out = func._implementation(*new_args, **kwargs) - if func in [np.transpose]: - return out - # Special case for array with symmetrical shapes and some functions like sum if self.ndim > 1 and np.all(self.shape[0] == np.array(self.shape)): - # The output should have fewer dimensions - if out.ndim < self.ndim: - # Need to check axis - sig = inspect.signature(func) - bound = sig.bind_partial(*new_args, **kwargs) + # Need to check axis + sig = inspect.signature(func) + bound = sig.bind_partial(*new_args, **kwargs) + if out.ndim < self.ndim: # Case it reduces dimensions axis = bound.arguments.get("axis", None) # If axis = 0, the time axis disappear so should return a numpy array if axis == 0: return out + else: # ndim is the same + if func in [np.flip, np.flipud, np.rollaxis]: + axis = bound.arguments.get("axis", None) + if axis is None or axis == 0: + return out + if func is np.transpose: + axes = bound.arguments.get("axes", None) + if axes is None or (isinstance(axes, tuple) and axes[0] != 0): + return out + if func is np.moveaxis: + source = bound.arguments.get("source", None) + destination = bound.arguments.get("destination", None) + if source == 0 or destination == 0: + return out + if func is np.swapaxes: + axis1 = bound.arguments.get("axis1", None) + axis2 = bound.arguments.get("axis2", None) + if axis1 == 0 or axis2 == 0: + return out return _initialize_tsd_output(self, out) diff --git a/tests/test_numpy_compatibility.py b/tests/test_numpy_compatibility.py index 05495d4d4..17f3ad010 100644 --- a/tests/test_numpy_compatibility.py +++ b/tests/test_numpy_compatibility.py @@ -544,3 +544,46 @@ def test_square_arrays(tsd, func, kwargs): assert not isinstance(a, np.ndarray) np.testing.assert_array_almost_equal(a.index, tsd.index) np.testing.assert_array_almost_equal(a.values, b) + + +@pytest.mark.parametrize( + "tsd", + [ + nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 10, 10), time_units="s"), + ], +) +@pytest.mark.parametrize( + "func, kwargs, expected_type", + [ + ("transpose", {}, np.ndarray), + ("transpose", {"axes": (2, 0, 1)}, np.ndarray), + ("transpose", {"axes": (0, 2, 1)}, nap.TsdTensor), + ("moveaxis", {"source": 0, "destination": 1}, np.ndarray), + ("moveaxis", {"source": 1, "destination": 0}, np.ndarray), + ("moveaxis", {"source": 2, "destination": 1}, nap.TsdTensor), + ("swapaxes", {"axis1": 0, "axis2": 1}, np.ndarray), + ("swapaxes", {"axis1": 1, "axis2": 2}, nap.TsdTensor), + ("swapaxes", {"axis1": 2, "axis2": 0}, np.ndarray), + ("rollaxis", {"axis": 0, "start": 1}, np.ndarray), + ("rollaxis", {"axis": 1, "start": 0}, nap.TsdTensor), + ("rollaxis", {"axis": 2, "start": 0}, nap.TsdTensor), + ("flipud", {}, np.ndarray), + ("fliplr", {}, nap.TsdTensor), + ("flip", {"axis": 0}, np.ndarray), + ("flip", {"axis": 1}, nap.TsdTensor), + ("flip", {"axis": 2}, nap.TsdTensor), + ], +) +def test_axis_moving(tsd, func, kwargs, expected_type): + a = getattr(np, func)(tsd, **kwargs) + b = getattr(np, func)(tsd.values, **kwargs) + + assert isinstance(a, expected_type) + + if not isinstance(a, np.ndarray): + np.testing.assert_array_almost_equal(a.index, tsd.index) + + if hasattr(a, "values"): + np.testing.assert_array_almost_equal(a.values, b) + else: + np.testing.assert_array_almost_equal(a, b) From 2da00b78bf663bb0c9e07dc673a50aa1dfab64a8 Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Mon, 20 Oct 2025 14:39:35 -0400 Subject: [PATCH 177/244] version fix for foc --- .github/workflows/deploy.yml | 11 ++++++++++- .github/workflows/documentation.yml | 4 ++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 6fe1848ff..c2d6a6e6e 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -8,20 +8,25 @@ jobs: name: Build and test package runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - name: Checkout with full history and tags + uses: actions/checkout@v4 # this is necessary for setuptools_scm to work properly with github # actions, see https://github.com/pypa/setuptools_scm/issues/480 and # https://stackoverflow.com/a/68959339 with: fetch-depth: 0 + tags: true + - name: Set up Python uses: actions/setup-python@v5 with: python-version: 3.x + - name: Build package run: | pip install build python -m build --outdir dist/ --sdist --wheel + - name: Check there's only one sdist and one whl file created shell: bash # because the following two tests will be weird otherwise. see @@ -31,6 +36,7 @@ jobs: run: | [[ $(find dist/ -type f -name "*whl" -printf x | wc -c) == 1 ]] || exit 1 [[ $(find dist/ -type f -name "*tar.gz" -printf x | wc -c) == 1 ]] || exit 1 + - name: Check setuptools_scm version against git tag shell: bash run: | @@ -40,6 +46,7 @@ jobs: # ends in the most recent git tag, fail if it does not. TAG=$(git describe --tags) [[ "$(ls dist/*tar.gz)" =~ "-${TAG:1}.tar.gz" ]] + - name: Check we can install from wheel # note that this is how this works in bash (different shells might be # slightly different). we've checked there's only one .whl file in an @@ -50,10 +57,12 @@ jobs: shell: bash run: | pip install "$(ls dist/*whl)[dev]" + - name: Run some tests # modify the following as necessary to e.g., run notebooks run: | pytest tests/ + - uses: actions/upload-artifact@v4 with: path: dist/* diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index d2ea86667..c5ff7e986 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -22,6 +22,10 @@ jobs: # https://stackoverflow.com/a/68959339 with: fetch-depth: 0 + tags: true + # Ensure all tags are present + - run: git fetch --tags + - uses: actions/setup-python@v5 - name: Install dependencies run: | From aa9a4dd583d087fed50b530bba1beed2d7f0841f Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Mon, 20 Oct 2025 15:23:39 -0400 Subject: [PATCH 178/244] Update doc --- doc/user_guide/06_tuning_curves.md | 34 ++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md index 5177efe4f..bee472c10 100644 --- a/doc/user_guide/06_tuning_curves.md +++ b/doc/user_guide/06_tuning_curves.md @@ -152,6 +152,40 @@ plt.legend() plt.show() ``` +It is also possible to just get the spike counts per bins. This can be done by setting the argument `return_counts=True`. +The output is also a `xarray.DataArray` with the same dimensions as the tuning curves. + +```{code-cell} ipython3 +spike_counts = nap.compute_tuning_curves( + data=tsgroup, + features=feature, + bins=30, + range=(0, 2*np.pi), + feature_names=["feature"], + return_counts=True + ) +``` + +```{code-cell} ipython3 +:tags: [hide-input] +plt.figure() +plt.subplot(131) +plt.plot(tsgroup[3].value_from(feature), 'o') +plt.plot(feature, label="feature") +plt.ylabel("Feature") +plt.xlim(0, 2) +plt.xlabel("Time (s)") +plt.subplot(132) +plt.plot(tuning_curves_1d[3].values, tuning_curves_1d.coords["feature"]) +plt.xlabel("Firing rate (Hz)") +plt.subplot(133) +plt.barh(spike_counts.coords["feature"], width=spike_counts[3].values, height=np.mean(np.diff(spike_counts.coords["feature"]))) +plt.xlabel("Spike count") +plt.tight_layout() +plt.show() +``` + + ### 2D tuning curves from spikes ```{code-cell} ipython3 From 45cd60e020d0a6b25c02166d98fa1b41eec8c6c1 Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Mon, 20 Oct 2025 16:55:54 -0400 Subject: [PATCH 179/244] Fixing --- pynapple/core/time_series.py | 36 ++-------- pynapple/core/utils.py | 106 ++++++++++++++++++++++++++++++ tests/test_numpy_compatibility.py | 25 +++++-- 3 files changed, 130 insertions(+), 37 deletions(-) diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index cde59f3c0..4856de619 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -47,6 +47,7 @@ add_docstring, convert_to_array, is_array_like, + modifies_time_axis, ) @@ -305,37 +306,10 @@ def __array_function__(self, func, types, args, kwargs): out = func._implementation(*new_args, **kwargs) - # Special case for array with symmetrical shapes and some functions like sum - if self.ndim > 1 and np.all(self.shape[0] == np.array(self.shape)): - # Need to check axis - sig = inspect.signature(func) - bound = sig.bind_partial(*new_args, **kwargs) - if out.ndim < self.ndim: # Case it reduces dimensions - axis = bound.arguments.get("axis", None) - # If axis = 0, the time axis disappear so should return a numpy array - if axis == 0: - return out - else: # ndim is the same - if func in [np.flip, np.flipud, np.rollaxis]: - axis = bound.arguments.get("axis", None) - if axis is None or axis == 0: - return out - if func is np.transpose: - axes = bound.arguments.get("axes", None) - if axes is None or (isinstance(axes, tuple) and axes[0] != 0): - return out - if func is np.moveaxis: - source = bound.arguments.get("source", None) - destination = bound.arguments.get("destination", None) - if source == 0 or destination == 0: - return out - if func is np.swapaxes: - axis1 = bound.arguments.get("axis1", None) - axis2 = bound.arguments.get("axis2", None) - if axis1 == 0 or axis2 == 0: - return out - - return _initialize_tsd_output(self, out) + if modifies_time_axis(func, new_args, kwargs): + return out + else: + return _initialize_tsd_output(self, out) def as_array(self): """ diff --git a/pynapple/core/utils.py b/pynapple/core/utils.py index 9b24315b7..4cfee887c 100644 --- a/pynapple/core/utils.py +++ b/pynapple/core/utils.py @@ -2,8 +2,10 @@ Utility functions """ +import inspect import os import warnings +from collections.abc import Sequence from itertools import combinations from numbers import Number from pathlib import Path @@ -484,3 +486,107 @@ def wrapper(func): return func return wrapper + + +def _arg_as_sequence(x): + return isinstance(x, Sequence) and not isinstance(x, (str, bytes)) + + +def modifies_time_axis(func, new_args, kwargs): + """ + Return True if calling func(*new_args, **kwargs) would modify/move axis 0. + Uses inspect.signature(bind_partial + apply_defaults) to get effective args. + Conservative: if we can't determine array ndim, assume it *may* modify axis 0. + """ + if func is np.flipud: + return True + + sig = inspect.signature(func) + bound = sig.bind_partial(*new_args, **kwargs) + bound.apply_defaults() + + # Helper to get first array-like from positional args (conservative) + arr = None + if new_args: + arr = new_args[0] + else: + # try common kw names + for name in ("a", "arr", "array", "x", "m"): + if name in bound.arguments: + arr = bound.arguments[name] + break + + ndim = getattr(arr, "ndim", None) + + ### 1) single-axis arguments ### + axis = bound.arguments.get("axis", inspect._empty) + if axis is not inspect._empty: + # axis=None usually means "all axes" for reductions => affects axis 0 + if axis is None: + return True + # axis might be negative; normalize if ndim known + if ndim is not None: + try: + norm = axis if axis >= 0 else axis + ndim + except Exception: + norm = axis + if norm == 0: + return True + else: + # unknown ndim: if axis == 0 or axis is None -> assume it affects axis 0 + if axis == 0: + return True + # Special case for np.rollaxis + if func is np.rollaxis: + start = bound.arguments.get("start", 0) + if start == 0: + return True + if func is np.rot90: + axes = bound.arguments.get("axes", (0, 1)) + if 0 in axes: + return True + + ### 2) multi-axis permutation (e.g., transpose) ### + axes = bound.arguments.get("axes", inspect._empty) + if axes is not inspect._empty: + if axes is None: + # axes=None => reverse axes order; if ndim > 1 then axis 0 moves to last position + if ndim is None: + return True # conservative + return ndim > 1 and 0 != (ndim - 1) + if _arg_as_sequence(axes): + # if axis 0 is not at position 0 after permutation, it's moved + try: + idx = list(axes).index(0) + except ValueError: + # axis 0 not present? that's odd, but assume modified + return True + # idx is new position of original axis 0 + if idx != 0: + return True + + ### 3) moveaxis: source/destination can be ints or sequences ### + for name in ("source", "destination"): + val = bound.arguments.get(name, inspect._empty) + if val is not inspect._empty: + if val is None: + continue + if _arg_as_sequence(val): + if 0 in val: + return True + else: + if val == 0: + return True + + ### 4) swapaxes / similar ### + axis1 = bound.arguments.get("axis1", inspect._empty) + axis2 = bound.arguments.get("axis2", inspect._empty) + if axis1 is not inspect._empty: + if axis1 == 0: + return True + if axis2 is not inspect._empty: + if axis2 == 0: + return True + + # If none of the checks triggered, assume axis 0 is not modified. + return False diff --git a/tests/test_numpy_compatibility.py b/tests/test_numpy_compatibility.py index 17f3ad010..61cf9f2d4 100644 --- a/tests/test_numpy_compatibility.py +++ b/tests/test_numpy_compatibility.py @@ -522,6 +522,7 @@ def test_fft(self, tsd): ("sum", {"axis": 1}), ("sum", {"axis": -1}), ("sum", {"axis": (0, 1)}), + ("sum", {"axis": None}), ], ) def test_square_arrays(tsd, func, kwargs): @@ -549,13 +550,15 @@ def test_square_arrays(tsd, func, kwargs): @pytest.mark.parametrize( "tsd", [ + nap.Tsd(t=np.arange(10), d=np.random.rand(10)), + nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 10)), nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 10, 10), time_units="s"), ], ) @pytest.mark.parametrize( "func, kwargs, expected_type", [ - ("transpose", {}, np.ndarray), + ("transpose", {}, (nap.Tsd, np.ndarray)), ("transpose", {"axes": (2, 0, 1)}, np.ndarray), ("transpose", {"axes": (0, 2, 1)}, nap.TsdTensor), ("moveaxis", {"source": 0, "destination": 1}, np.ndarray), @@ -565,18 +568,28 @@ def test_square_arrays(tsd, func, kwargs): ("swapaxes", {"axis1": 1, "axis2": 2}, nap.TsdTensor), ("swapaxes", {"axis1": 2, "axis2": 0}, np.ndarray), ("rollaxis", {"axis": 0, "start": 1}, np.ndarray), - ("rollaxis", {"axis": 1, "start": 0}, nap.TsdTensor), - ("rollaxis", {"axis": 2, "start": 0}, nap.TsdTensor), + ("rollaxis", {"axis": 1, "start": 0}, np.ndarray), + ("rollaxis", {"axis": 1, "start": 2}, (nap.TsdTensor, nap.TsdFrame)), ("flipud", {}, np.ndarray), - ("fliplr", {}, nap.TsdTensor), + ("fliplr", {}, (nap.TsdFrame, nap.TsdTensor)), ("flip", {"axis": 0}, np.ndarray), - ("flip", {"axis": 1}, nap.TsdTensor), + ("flip", {"axis": None}, np.ndarray), + ("flip", {"axis": 1}, (nap.TsdFrame, nap.TsdTensor)), ("flip", {"axis": 2}, nap.TsdTensor), + ("rot90", {}, np.ndarray), + ("rot90", {"k": 2}, np.ndarray), + ("roll", {"shift": 2, "axis": 0}, np.ndarray), + ("roll", {"shift": -2, "axis": 1}, (nap.TsdFrame, nap.TsdTensor)), + ("roll", {"shift": 1, "axis": 2}, nap.TsdTensor), ], ) def test_axis_moving(tsd, func, kwargs, expected_type): + try: + b = getattr(np, func)(tsd.values, **kwargs) + except (ValueError, RuntimeError): + pytest.skip("Skipping invalid axis operation") + a = getattr(np, func)(tsd, **kwargs) - b = getattr(np, func)(tsd.values, **kwargs) assert isinstance(a, expected_type) From 0029a6d84660dbfac3269bcfda35d1fd81e7ad24 Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Mon, 20 Oct 2025 16:58:06 -0400 Subject: [PATCH 180/244] fix linting --- pynapple/core/time_series.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index 4856de619..b9fa9cc58 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -16,7 +16,6 @@ import abc import importlib -import inspect import warnings from numbers import Number From 7c95a23a068c11babb2b0a7668faa231fcd1a9f8 Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Tue, 21 Oct 2025 13:00:25 -0400 Subject: [PATCH 181/244] UPDATE --- pynapple/core/time_series.py | 12 +++- pynapple/core/utils.py | 36 +++++++++--- tests/test_numpy_compatibility.py | 92 ++++++++++++++++++++++++++++++- 3 files changed, 129 insertions(+), 11 deletions(-) diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index b9fa9cc58..716b4d5bb 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -293,7 +293,7 @@ def __array_function__(self, func, types, args, kwargs): if func in [np.split, np.array_split, np.dsplit, np.hsplit, np.vsplit]: return _split_tsd(func, *args, **kwargs) - if func in [np.concatenate, np.vstack, np.hstack, np.dstack]: + if func in [np.concatenate, np.vstack, np.hstack, np.dstack, np.column_stack]: return _concatenate_tsd(func, *args, **kwargs) new_args = [] @@ -1012,6 +1012,8 @@ def __getitem__(self, key): key = tuple(k.values if isinstance(k, Tsd) else k for k in key) output = self.values.__getitem__(key) index = self.index.__getitem__(key[0]) + if index.ndim > 1: + index = np.squeeze(index) else: output = self.values.__getitem__(key) index = self.index.__getitem__(key) @@ -1775,7 +1777,9 @@ def __getitem__(self, key, *args, **kwargs): if isinstance(key, tuple): index = self.index.__getitem__(key[0]) - if len(key) == 2: + if index.ndim > 1: + index = np.squeeze(index) + if len(key) == 2 and key[1] is not None: columns = self.columns.__getitem__(key[1]) else: index = self.index.__getitem__(key) @@ -2791,6 +2795,8 @@ def __getitem__(self, key, *args, **kwargs): if isinstance(key, tuple): index = self.index.__getitem__(key[0]) + if index.ndim > 1: + index = np.squeeze(index) elif isinstance(key, Number): index = np.array([key]) else: @@ -3369,6 +3375,8 @@ def __repr__(self): def __getitem__(self, key): if isinstance(key, tuple): index = self.index.__getitem__(key[0]) + if index.ndim > 1: + index = np.squeeze(index) else: index = self.index.__getitem__(key) diff --git a/pynapple/core/utils.py b/pynapple/core/utils.py index 4cfee887c..ffb3ece54 100644 --- a/pynapple/core/utils.py +++ b/pynapple/core/utils.py @@ -312,9 +312,16 @@ def _concatenate_tsd(func, *args, **kwargs): support_equal = _check_time_equals([x.values for x in time_supports]) if time_equal and support_equal: - return nap_class( - t=time_indexes[0], d=output, time_support=time_supports[0] - ) + new_kwargs = {} + if len(columns): + new_kwargs = {"columns": np.hstack([c for c in columns])} + if len(new_kwargs["columns"]) != output.shape[1]: + new_kwargs = {} + return args[0][0]._define_instance( + time_index = time_indexes[0], + time_support = time_supports[0], + values = output, + **new_kwargs) # Dropping metadata in this case else: if not time_equal and not support_equal: msg = "Time indexes and time supports are not all equals up to pynapple precision. Returning numpy array!" @@ -500,8 +507,14 @@ def modifies_time_axis(func, new_args, kwargs): """ if func is np.flipud: return True + if func in [np.squeeze]: + return False # This one should be handled by _initialize_tsd_output + + try: + sig = inspect.signature(func) + except (TypeError, ValueError): + return True # conservative - sig = inspect.signature(func) bound = sig.bind_partial(*new_args, **kwargs) bound.apply_defaults() @@ -517,9 +530,18 @@ def modifies_time_axis(func, new_args, kwargs): break ndim = getattr(arr, "ndim", None) + if ndim is None: + return True # conservative ### 1) single-axis arguments ### axis = bound.arguments.get("axis", inspect._empty) + if func is np.flip: + if axis == 0: + return True + if func is np.roll: + shift = bound.arguments.get("shift", None) # This one should be always pass + if axis == 0 and shift != 0: + return True if axis is not inspect._empty: # axis=None usually means "all axes" for reductions => affects axis 0 if axis is None: @@ -527,10 +549,10 @@ def modifies_time_axis(func, new_args, kwargs): # axis might be negative; normalize if ndim known if ndim is not None: try: - norm = axis if axis >= 0 else axis + ndim + normalized_axis = axis if axis >= 0 else axis + ndim except Exception: - norm = axis - if norm == 0: + normalized_axis = axis + if normalized_axis == 0 and ndim > 1: # If og ndim is 1, axis 0 can't be moved return True else: # unknown ndim: if axis == 0 or axis is None -> assume it affects axis 0 diff --git a/tests/test_numpy_compatibility.py b/tests/test_numpy_compatibility.py index 61cf9f2d4..40f9e3840 100644 --- a/tests/test_numpy_compatibility.py +++ b/tests/test_numpy_compatibility.py @@ -567,7 +567,11 @@ def test_square_arrays(tsd, func, kwargs): ("swapaxes", {"axis1": 0, "axis2": 1}, np.ndarray), ("swapaxes", {"axis1": 1, "axis2": 2}, nap.TsdTensor), ("swapaxes", {"axis1": 2, "axis2": 0}, np.ndarray), - ("rollaxis", {"axis": 0, "start": 1}, np.ndarray), + ("rollaxis", {"axis": 0, "start": 1}, { + "Tsd": nap.Tsd, + "TsdFrame": np.ndarray, + "TsdTensor": np.ndarray} + ), ("rollaxis", {"axis": 1, "start": 0}, np.ndarray), ("rollaxis", {"axis": 1, "start": 2}, (nap.TsdTensor, nap.TsdFrame)), ("flipud", {}, np.ndarray), @@ -591,7 +595,10 @@ def test_axis_moving(tsd, func, kwargs, expected_type): a = getattr(np, func)(tsd, **kwargs) - assert isinstance(a, expected_type) + if isinstance(expected_type, dict): + assert isinstance(a, expected_type[tsd.nap_class]) + else: + assert isinstance(a, expected_type) if not isinstance(a, np.ndarray): np.testing.assert_array_almost_equal(a.index, tsd.index) @@ -600,3 +607,84 @@ def test_axis_moving(tsd, func, kwargs, expected_type): np.testing.assert_array_almost_equal(a.values, b) else: np.testing.assert_array_almost_equal(a, b) + + +@pytest.mark.parametrize( + "tsd", + [ + nap.Tsd(t=np.arange(10), d=np.random.rand(10)), + nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 10)), + nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 10, 10)), + nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 10, 1)), + ], +) +@pytest.mark.parametrize( + "func, kwargs, expected_type", + [ + ("expand_dims", {"axis": 0}, np.ndarray), + ("expand_dims", {"axis": 1}, (nap.TsdFrame, nap.TsdTensor)), + ("expand_dims", {"axis": -1}, (nap.TsdFrame, nap.TsdTensor)), + ("squeeze", {}, (nap.Tsd, nap.TsdFrame, nap.TsdTensor)), + ("ravel", {}, {"Tsd":nap.Tsd, + "TsdFrame":np.ndarray, + "TsdTensor":np.ndarray + }), + ("ravel", {"order": "F"}, {"Tsd":nap.Tsd, + "TsdFrame":np.ndarray, + "TsdTensor":np.ndarray + }), + ("tile", {"reps": 2}, {"Tsd":np.ndarray, + "TsdFrame":nap.TsdFrame, + "TsdTensor":nap.TsdTensor + }), + ("tile", {"reps": (2, 1)}, {"Tsd":np.ndarray, + "TsdFrame":np.ndarray, + "TsdTensor":nap.TsdTensor + }), + ("tile", {"reps": (1, 2)}, {"Tsd":np.ndarray, + "TsdFrame":nap.TsdFrame, + "TsdTensor":nap.TsdTensor + }), + ], +) +def test_shape_change(tsd, func, kwargs, expected_type): + try: + b = getattr(np, func)(tsd.values, **kwargs) + except (ValueError, RuntimeError): + pytest.skip("Skipping invalid axis operation") + + a = getattr(np, func)(tsd, **kwargs) + + if isinstance(expected_type, dict): + assert isinstance(a, expected_type[tsd.nap_class]) + else: + assert isinstance(a, expected_type) + + if not isinstance(a, np.ndarray): + np.testing.assert_array_almost_equal(a.index, tsd.index) + + if hasattr(a, "values"): + np.testing.assert_array_almost_equal(a.values, b) + else: + np.testing.assert_array_almost_equal(a, b) + + +@pytest.mark.parametrize( + "tsd, slicing, expected_type", + [ + (nap.Tsd(t=np.arange(10), d=np.random.rand(10)), lambda x: x[None, :], np.ndarray), + (nap.Tsd(t=np.arange(10), d=np.random.rand(10)), lambda x: x[:, None], nap.TsdFrame), + (nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 10)), lambda x: x[:, None], nap.TsdTensor), + (nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 10)), lambda x: x[:, :, None], nap.TsdTensor), + (nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 10, 10)), lambda x: x[:, None], nap.TsdTensor), + (nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 10, 1)), lambda x: x[None, :], np.ndarray), + ], +) +def test_shape_change_2(tsd, slicing, expected_type): + a = slicing(tsd) + assert isinstance(a, expected_type) + if hasattr(a, "index"): + np.testing.assert_array_almost_equal(a.index, tsd.index) + if hasattr(a, "values"): + np.testing.assert_array_almost_equal(a.values, slicing(tsd.values)) + From 369c6a2427330f79680a64bc812578968d9b7bbd Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Tue, 21 Oct 2025 14:13:48 -0400 Subject: [PATCH 182/244] Update --- pynapple/core/utils.py | 17 +++--- tests/test_numpy_compatibility.py | 90 ++++++++++++++++++++----------- 2 files changed, 69 insertions(+), 38 deletions(-) diff --git a/pynapple/core/utils.py b/pynapple/core/utils.py index ffb3ece54..4a024a304 100644 --- a/pynapple/core/utils.py +++ b/pynapple/core/utils.py @@ -318,10 +318,11 @@ def _concatenate_tsd(func, *args, **kwargs): if len(new_kwargs["columns"]) != output.shape[1]: new_kwargs = {} return args[0][0]._define_instance( - time_index = time_indexes[0], - time_support = time_supports[0], - values = output, - **new_kwargs) # Dropping metadata in this case + time_index=time_indexes[0], + time_support=time_supports[0], + values=output, + **new_kwargs, + ) # Dropping metadata in this case else: if not time_equal and not support_equal: msg = "Time indexes and time supports are not all equals up to pynapple precision. Returning numpy array!" @@ -508,7 +509,7 @@ def modifies_time_axis(func, new_args, kwargs): if func is np.flipud: return True if func in [np.squeeze]: - return False # This one should be handled by _initialize_tsd_output + return False # This one should be handled by _initialize_tsd_output try: sig = inspect.signature(func) @@ -539,7 +540,7 @@ def modifies_time_axis(func, new_args, kwargs): if axis == 0: return True if func is np.roll: - shift = bound.arguments.get("shift", None) # This one should be always pass + shift = bound.arguments.get("shift", None) # This one should be always pass if axis == 0 and shift != 0: return True if axis is not inspect._empty: @@ -552,7 +553,9 @@ def modifies_time_axis(func, new_args, kwargs): normalized_axis = axis if axis >= 0 else axis + ndim except Exception: normalized_axis = axis - if normalized_axis == 0 and ndim > 1: # If og ndim is 1, axis 0 can't be moved + if ( + normalized_axis == 0 and ndim > 1 + ): # If og ndim is 1, axis 0 can't be moved return True else: # unknown ndim: if axis == 0 or axis is None -> assume it affects axis 0 diff --git a/tests/test_numpy_compatibility.py b/tests/test_numpy_compatibility.py index 40f9e3840..5e185e50f 100644 --- a/tests/test_numpy_compatibility.py +++ b/tests/test_numpy_compatibility.py @@ -567,10 +567,10 @@ def test_square_arrays(tsd, func, kwargs): ("swapaxes", {"axis1": 0, "axis2": 1}, np.ndarray), ("swapaxes", {"axis1": 1, "axis2": 2}, nap.TsdTensor), ("swapaxes", {"axis1": 2, "axis2": 0}, np.ndarray), - ("rollaxis", {"axis": 0, "start": 1}, { - "Tsd": nap.Tsd, - "TsdFrame": np.ndarray, - "TsdTensor": np.ndarray} + ( + "rollaxis", + {"axis": 0, "start": 1}, + {"Tsd": nap.Tsd, "TsdFrame": np.ndarray, "TsdTensor": np.ndarray}, ), ("rollaxis", {"axis": 1, "start": 0}, np.ndarray), ("rollaxis", {"axis": 1, "start": 2}, (nap.TsdTensor, nap.TsdFrame)), @@ -625,26 +625,31 @@ def test_axis_moving(tsd, func, kwargs, expected_type): ("expand_dims", {"axis": 1}, (nap.TsdFrame, nap.TsdTensor)), ("expand_dims", {"axis": -1}, (nap.TsdFrame, nap.TsdTensor)), ("squeeze", {}, (nap.Tsd, nap.TsdFrame, nap.TsdTensor)), - ("ravel", {}, {"Tsd":nap.Tsd, - "TsdFrame":np.ndarray, - "TsdTensor":np.ndarray - }), - ("ravel", {"order": "F"}, {"Tsd":nap.Tsd, - "TsdFrame":np.ndarray, - "TsdTensor":np.ndarray - }), - ("tile", {"reps": 2}, {"Tsd":np.ndarray, - "TsdFrame":nap.TsdFrame, - "TsdTensor":nap.TsdTensor - }), - ("tile", {"reps": (2, 1)}, {"Tsd":np.ndarray, - "TsdFrame":np.ndarray, - "TsdTensor":nap.TsdTensor - }), - ("tile", {"reps": (1, 2)}, {"Tsd":np.ndarray, - "TsdFrame":nap.TsdFrame, - "TsdTensor":nap.TsdTensor - }), + ( + "ravel", + {}, + {"Tsd": nap.Tsd, "TsdFrame": np.ndarray, "TsdTensor": np.ndarray}, + ), + ( + "ravel", + {"order": "F"}, + {"Tsd": nap.Tsd, "TsdFrame": np.ndarray, "TsdTensor": np.ndarray}, + ), + ( + "tile", + {"reps": 2}, + {"Tsd": np.ndarray, "TsdFrame": nap.TsdFrame, "TsdTensor": nap.TsdTensor}, + ), + ( + "tile", + {"reps": (2, 1)}, + {"Tsd": np.ndarray, "TsdFrame": np.ndarray, "TsdTensor": nap.TsdTensor}, + ), + ( + "tile", + {"reps": (1, 2)}, + {"Tsd": np.ndarray, "TsdFrame": nap.TsdFrame, "TsdTensor": nap.TsdTensor}, + ), ], ) def test_shape_change(tsd, func, kwargs, expected_type): @@ -672,12 +677,36 @@ def test_shape_change(tsd, func, kwargs, expected_type): @pytest.mark.parametrize( "tsd, slicing, expected_type", [ - (nap.Tsd(t=np.arange(10), d=np.random.rand(10)), lambda x: x[None, :], np.ndarray), - (nap.Tsd(t=np.arange(10), d=np.random.rand(10)), lambda x: x[:, None], nap.TsdFrame), - (nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 10)), lambda x: x[:, None], nap.TsdTensor), - (nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 10)), lambda x: x[:, :, None], nap.TsdTensor), - (nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 10, 10)), lambda x: x[:, None], nap.TsdTensor), - (nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 10, 1)), lambda x: x[None, :], np.ndarray), + ( + nap.Tsd(t=np.arange(10), d=np.random.rand(10)), + lambda x: x[None, :], + np.ndarray, + ), + ( + nap.Tsd(t=np.arange(10), d=np.random.rand(10)), + lambda x: x[:, None], + nap.TsdFrame, + ), + ( + nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 10)), + lambda x: x[:, None], + nap.TsdTensor, + ), + ( + nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 10)), + lambda x: x[:, :, None], + nap.TsdTensor, + ), + ( + nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 10, 10)), + lambda x: x[:, None], + nap.TsdTensor, + ), + ( + nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 10, 1)), + lambda x: x[None, :], + np.ndarray, + ), ], ) def test_shape_change_2(tsd, slicing, expected_type): @@ -687,4 +716,3 @@ def test_shape_change_2(tsd, slicing, expected_type): np.testing.assert_array_almost_equal(a.index, tsd.index) if hasattr(a, "values"): np.testing.assert_array_almost_equal(a.values, slicing(tsd.values)) - From c65f5308a53f35e6dd6644b9a05a88e0d2a5457c Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Tue, 21 Oct 2025 17:08:18 -0400 Subject: [PATCH 183/244] Update --- pynapple/core/time_series.py | 9 +- pynapple/core/utils.py | 17 ++- tests/test_numpy_compatibility.py | 212 +++++++++++++++++++++++++----- 3 files changed, 199 insertions(+), 39 deletions(-) diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index 716b4d5bb..dbc167a14 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -293,7 +293,14 @@ def __array_function__(self, func, types, args, kwargs): if func in [np.split, np.array_split, np.dsplit, np.hsplit, np.vsplit]: return _split_tsd(func, *args, **kwargs) - if func in [np.concatenate, np.vstack, np.hstack, np.dstack, np.column_stack]: + if func in [ + np.concatenate, + np.vstack, + np.hstack, + np.dstack, + np.column_stack, + np.stack, + ]: return _concatenate_tsd(func, *args, **kwargs) new_args = [] diff --git a/pynapple/core/utils.py b/pynapple/core/utils.py index ffb3ece54..4a024a304 100644 --- a/pynapple/core/utils.py +++ b/pynapple/core/utils.py @@ -318,10 +318,11 @@ def _concatenate_tsd(func, *args, **kwargs): if len(new_kwargs["columns"]) != output.shape[1]: new_kwargs = {} return args[0][0]._define_instance( - time_index = time_indexes[0], - time_support = time_supports[0], - values = output, - **new_kwargs) # Dropping metadata in this case + time_index=time_indexes[0], + time_support=time_supports[0], + values=output, + **new_kwargs, + ) # Dropping metadata in this case else: if not time_equal and not support_equal: msg = "Time indexes and time supports are not all equals up to pynapple precision. Returning numpy array!" @@ -508,7 +509,7 @@ def modifies_time_axis(func, new_args, kwargs): if func is np.flipud: return True if func in [np.squeeze]: - return False # This one should be handled by _initialize_tsd_output + return False # This one should be handled by _initialize_tsd_output try: sig = inspect.signature(func) @@ -539,7 +540,7 @@ def modifies_time_axis(func, new_args, kwargs): if axis == 0: return True if func is np.roll: - shift = bound.arguments.get("shift", None) # This one should be always pass + shift = bound.arguments.get("shift", None) # This one should be always pass if axis == 0 and shift != 0: return True if axis is not inspect._empty: @@ -552,7 +553,9 @@ def modifies_time_axis(func, new_args, kwargs): normalized_axis = axis if axis >= 0 else axis + ndim except Exception: normalized_axis = axis - if normalized_axis == 0 and ndim > 1: # If og ndim is 1, axis 0 can't be moved + if ( + normalized_axis == 0 and ndim > 1 + ): # If og ndim is 1, axis 0 can't be moved return True else: # unknown ndim: if axis == 0 or axis is None -> assume it affects axis 0 diff --git a/tests/test_numpy_compatibility.py b/tests/test_numpy_compatibility.py index 40f9e3840..5fb553e35 100644 --- a/tests/test_numpy_compatibility.py +++ b/tests/test_numpy_compatibility.py @@ -1,3 +1,4 @@ +import warnings from numbers import Number import numpy as np @@ -504,6 +505,127 @@ def test_fft(self, tsd): np.fft.fft(tsd) +@pytest.mark.parametrize( + "func, kwargs", + [ + ("concatenate", {}), + ("concatenate", {"axis": 0}), + ("concatenate", {"axis": 1}), + ("concatenate", {"axis": 2}), + ("stack", {}), + ("stack", {"axis": 0}), + ("stack", {"axis": 1}), + ("stack", {"axis": 2}), + ("stack", {"axis": -1}), + ("vstack", {}), + ("hstack", {}), + ("dstack", {}), + ("column_stack", {}), + ], +) +@pytest.mark.parametrize( + "tsds", + [ + ( + nap.Tsd(t=np.arange(10), d=np.random.rand(10)), + nap.Tsd(t=np.arange(10) + 15, d=np.random.rand(10)), + ), + ( + nap.Tsd(t=np.arange(10), d=np.random.rand(10)), + nap.Tsd(t=np.arange(10), d=np.random.rand(10)), + ), + ( + nap.Tsd(t=np.arange(10) + 15, d=np.random.rand(10)), + nap.Tsd(t=np.arange(10), d=np.random.rand(10)), + ), + ( + nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 5)), + nap.TsdFrame(t=np.arange(10) + 15, d=np.random.rand(10, 5)), + ), + ( + nap.TsdFrame(t=np.arange(10) + 15, d=np.random.rand(10, 5)), + nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 5)), + ), + ( + nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 5)), + nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 5)), + ), + ( + nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 5, 2)), + nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 5, 2)), + ), + ( + nap.TsdTensor(t=np.arange(10) + 15, d=np.random.rand(10, 5, 2)), + nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 5, 2)), + ), + ], +) +def test_concatenate_all(func, kwargs, tsds): + tsd1, tsd2 = tsds + try: + b = getattr(np, func)((tsd1.values, tsd2.values), **kwargs) + except (ValueError, RuntimeError): + pytest.skip("Skipping invalid axis operation") + + try: + with warnings.catch_warnings(record=True) as record: + warnings.simplefilter("always") + a = getattr(np, func)((tsd1, tsd2), **kwargs) + except (ValueError, RuntimeError) as e: + error_msg = str(e) + assert ( + error_msg + == "The order of the time series indexes should be strictly increasing and non overlapping." + ) + return + + if a.ndim == tsd1.ndim: + if a.shape[0] == tsd1.shape[0] + tsd2.shape[0]: # Stacking vertically + assert isinstance(a, tsd1.__class__) + np.testing.assert_array_almost_equal( + a.index, np.concatenate((tsd1.index, tsd2.index)) + ) + np.testing.assert_array_almost_equal(a.values, b) + np.testing.assert_array_equal( + np.vstack((tsd1.time_support.values, tsd2.time_support.values)), + a.time_support.values, + ) + else: + # Check if operation was allowed + if isinstance(a, tsd1.__class__): + np.testing.assert_array_almost_equal(tsd1.index, tsd2.index) + np.testing.assert_array_almost_equal(a.values, b) + np.testing.assert_array_equal(a.index, tsd1.index) + if hasattr(tsd1, "columns") and hasattr(tsd2, "columns"): + np.testing.assert_array_equal( + a.columns, np.concatenate((tsd1.columns, tsd2.columns), axis=0) + ) + else: + assert isinstance(a, np.ndarray) + np.testing.assert_array_almost_equal(a, b) + else: + # Check if operation was allowed + if hasattr(a, "nap_class"): + np.testing.assert_array_almost_equal(tsd1.index, tsd2.index) + np.testing.assert_array_almost_equal(a.values, b) + np.testing.assert_array_equal(a.index, tsd1.index) + np.testing.assert_array_equal( + tsd1.time_support.values, tsd2.time_support.values + ) + else: + assert isinstance(a, np.ndarray) + np.testing.assert_array_almost_equal(a, b) + + if len(record) > 0: + warning_msg = str(record[0].message) + assert warning_msg in [ + "Time indexes and time supports are not all equals up to pynapple precision. Returning numpy array!", + "Time indexes are not all equals up to pynapple precision. Returning numpy array!", + "Time supports are not all equals up to pynapple precision. Returning numpy array!", + ] + assert isinstance(a, np.ndarray) + + @pytest.mark.parametrize( "tsd", [ @@ -567,10 +689,10 @@ def test_square_arrays(tsd, func, kwargs): ("swapaxes", {"axis1": 0, "axis2": 1}, np.ndarray), ("swapaxes", {"axis1": 1, "axis2": 2}, nap.TsdTensor), ("swapaxes", {"axis1": 2, "axis2": 0}, np.ndarray), - ("rollaxis", {"axis": 0, "start": 1}, { - "Tsd": nap.Tsd, - "TsdFrame": np.ndarray, - "TsdTensor": np.ndarray} + ( + "rollaxis", + {"axis": 0, "start": 1}, + {"Tsd": nap.Tsd, "TsdFrame": np.ndarray, "TsdTensor": np.ndarray}, ), ("rollaxis", {"axis": 1, "start": 0}, np.ndarray), ("rollaxis", {"axis": 1, "start": 2}, (nap.TsdTensor, nap.TsdFrame)), @@ -625,26 +747,31 @@ def test_axis_moving(tsd, func, kwargs, expected_type): ("expand_dims", {"axis": 1}, (nap.TsdFrame, nap.TsdTensor)), ("expand_dims", {"axis": -1}, (nap.TsdFrame, nap.TsdTensor)), ("squeeze", {}, (nap.Tsd, nap.TsdFrame, nap.TsdTensor)), - ("ravel", {}, {"Tsd":nap.Tsd, - "TsdFrame":np.ndarray, - "TsdTensor":np.ndarray - }), - ("ravel", {"order": "F"}, {"Tsd":nap.Tsd, - "TsdFrame":np.ndarray, - "TsdTensor":np.ndarray - }), - ("tile", {"reps": 2}, {"Tsd":np.ndarray, - "TsdFrame":nap.TsdFrame, - "TsdTensor":nap.TsdTensor - }), - ("tile", {"reps": (2, 1)}, {"Tsd":np.ndarray, - "TsdFrame":np.ndarray, - "TsdTensor":nap.TsdTensor - }), - ("tile", {"reps": (1, 2)}, {"Tsd":np.ndarray, - "TsdFrame":nap.TsdFrame, - "TsdTensor":nap.TsdTensor - }), + ( + "ravel", + {}, + {"Tsd": nap.Tsd, "TsdFrame": np.ndarray, "TsdTensor": np.ndarray}, + ), + ( + "ravel", + {"order": "F"}, + {"Tsd": nap.Tsd, "TsdFrame": np.ndarray, "TsdTensor": np.ndarray}, + ), + ( + "tile", + {"reps": 2}, + {"Tsd": np.ndarray, "TsdFrame": nap.TsdFrame, "TsdTensor": nap.TsdTensor}, + ), + ( + "tile", + {"reps": (2, 1)}, + {"Tsd": np.ndarray, "TsdFrame": np.ndarray, "TsdTensor": nap.TsdTensor}, + ), + ( + "tile", + {"reps": (1, 2)}, + {"Tsd": np.ndarray, "TsdFrame": nap.TsdFrame, "TsdTensor": nap.TsdTensor}, + ), ], ) def test_shape_change(tsd, func, kwargs, expected_type): @@ -672,12 +799,36 @@ def test_shape_change(tsd, func, kwargs, expected_type): @pytest.mark.parametrize( "tsd, slicing, expected_type", [ - (nap.Tsd(t=np.arange(10), d=np.random.rand(10)), lambda x: x[None, :], np.ndarray), - (nap.Tsd(t=np.arange(10), d=np.random.rand(10)), lambda x: x[:, None], nap.TsdFrame), - (nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 10)), lambda x: x[:, None], nap.TsdTensor), - (nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 10)), lambda x: x[:, :, None], nap.TsdTensor), - (nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 10, 10)), lambda x: x[:, None], nap.TsdTensor), - (nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 10, 1)), lambda x: x[None, :], np.ndarray), + ( + nap.Tsd(t=np.arange(10), d=np.random.rand(10)), + lambda x: x[None, :], + np.ndarray, + ), + ( + nap.Tsd(t=np.arange(10), d=np.random.rand(10)), + lambda x: x[:, None], + nap.TsdFrame, + ), + ( + nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 10)), + lambda x: x[:, None], + nap.TsdTensor, + ), + ( + nap.TsdFrame(t=np.arange(10), d=np.random.rand(10, 10)), + lambda x: x[:, :, None], + nap.TsdTensor, + ), + ( + nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 10, 10)), + lambda x: x[:, None], + nap.TsdTensor, + ), + ( + nap.TsdTensor(t=np.arange(10), d=np.random.rand(10, 10, 1)), + lambda x: x[None, :], + np.ndarray, + ), ], ) def test_shape_change_2(tsd, slicing, expected_type): @@ -687,4 +838,3 @@ def test_shape_change_2(tsd, slicing, expected_type): np.testing.assert_array_almost_equal(a.index, tsd.index) if hasattr(a, "values"): np.testing.assert_array_almost_equal(a.values, slicing(tsd.values)) - From 3887bdeb73762c1679bfd3fc60683ba2679fed17 Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Tue, 21 Oct 2025 18:06:24 -0400 Subject: [PATCH 184/244] Update --- pynapple/core/time_series.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index dbc167a14..dc5e4c60c 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -287,6 +287,17 @@ def __array_function__(self, func, types, args, kwargs): ]: return NotImplemented + # This should be implemented at some point + if func in [ + np.take, + np.take_along_axis, + np.extract, + np.compress, + np.choose, + np.select, + ]: + return NotImplemented + if hasattr(np.fft, func.__name__): return NotImplemented From 6f422ed3181598da83c475bade6ae108b84c4622 Mon Sep 17 00:00:00 2001 From: sjvenditto Date: Wed, 22 Oct 2025 11:23:45 -0400 Subject: [PATCH 185/244] Update utils.py --- pynapple/core/utils.py | 58 +++++++++++++----------------------------- 1 file changed, 17 insertions(+), 41 deletions(-) diff --git a/pynapple/core/utils.py b/pynapple/core/utils.py index 4a024a304..1cc6ba11e 100644 --- a/pynapple/core/utils.py +++ b/pynapple/core/utils.py @@ -536,31 +536,18 @@ def modifies_time_axis(func, new_args, kwargs): ### 1) single-axis arguments ### axis = bound.arguments.get("axis", inspect._empty) - if func is np.flip: - if axis == 0: - return True - if func is np.roll: - shift = bound.arguments.get("shift", None) # This one should be always pass - if axis == 0 and shift != 0: - return True if axis is not inspect._empty: # axis=None usually means "all axes" for reductions => affects axis 0 - if axis is None: + if (axis is None) or (axis == 0): + return True + if isinstance(axis, tuple) and 0 in axis: return True # axis might be negative; normalize if ndim known - if ndim is not None: - try: - normalized_axis = axis if axis >= 0 else axis + ndim - except Exception: - normalized_axis = axis - if ( - normalized_axis == 0 and ndim > 1 - ): # If og ndim is 1, axis 0 can't be moved - return True - else: - # unknown ndim: if axis == 0 or axis is None -> assume it affects axis 0 - if axis == 0: + if (axis < 0) and (ndim > 1): + normalized_axis = axis + ndim + if normalized_axis == 0: return True + # Special case for np.rollaxis if func is np.rollaxis: start = bound.arguments.get("start", 0) @@ -575,17 +562,10 @@ def modifies_time_axis(func, new_args, kwargs): axes = bound.arguments.get("axes", inspect._empty) if axes is not inspect._empty: if axes is None: - # axes=None => reverse axes order; if ndim > 1 then axis 0 moves to last position - if ndim is None: - return True # conservative - return ndim > 1 and 0 != (ndim - 1) + return ndim > 1 if _arg_as_sequence(axes): # if axis 0 is not at position 0 after permutation, it's moved - try: - idx = list(axes).index(0) - except ValueError: - # axis 0 not present? that's odd, but assume modified - return True + idx = list(axes).index(0) # idx is new position of original axis 0 if idx != 0: return True @@ -596,22 +576,18 @@ def modifies_time_axis(func, new_args, kwargs): if val is not inspect._empty: if val is None: continue - if _arg_as_sequence(val): - if 0 in val: - return True - else: - if val == 0: - return True + elif (_arg_as_sequence(val)) and (0 in val): + return True + elif val == 0: + return True ### 4) swapaxes / similar ### axis1 = bound.arguments.get("axis1", inspect._empty) axis2 = bound.arguments.get("axis2", inspect._empty) - if axis1 is not inspect._empty: - if axis1 == 0: - return True - if axis2 is not inspect._empty: - if axis2 == 0: - return True + if (axis1 is not inspect._empty) and (axis1 == 0): + return True + if (axis2 is not inspect._empty) and (axis2 == 0): + return True # If none of the checks triggered, assume axis 0 is not modified. return False From e957c007849ab2c20b0510ebd26ac15fd29f930f Mon Sep 17 00:00:00 2001 From: sjvenditto Date: Wed, 22 Oct 2025 14:27:32 -0400 Subject: [PATCH 186/244] some cleanup and fixes --- pynapple/core/time_series.py | 1 + pynapple/core/utils.py | 25 +++++++++++++++---------- tests/test_numpy_compatibility.py | 9 +++++++-- 3 files changed, 23 insertions(+), 12 deletions(-) diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index dc5e4c60c..eb6e4551b 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -295,6 +295,7 @@ def __array_function__(self, func, types, args, kwargs): np.compress, np.choose, np.select, + np.delete, ]: return NotImplemented diff --git a/pynapple/core/utils.py b/pynapple/core/utils.py index 1cc6ba11e..a2a48230c 100644 --- a/pynapple/core/utils.py +++ b/pynapple/core/utils.py @@ -508,7 +508,7 @@ def modifies_time_axis(func, new_args, kwargs): """ if func is np.flipud: return True - if func in [np.squeeze]: + if func is np.squeeze: return False # This one should be handled by _initialize_tsd_output try: @@ -540,29 +540,34 @@ def modifies_time_axis(func, new_args, kwargs): # axis=None usually means "all axes" for reductions => affects axis 0 if (axis is None) or (axis == 0): return True - if isinstance(axis, tuple) and 0 in axis: + if isinstance(axis, tuple) and (0 in axis): return True # axis might be negative; normalize if ndim known - if (axis < 0) and (ndim > 1): + if axis < 0: normalized_axis = axis + ndim - if normalized_axis == 0: - return True + if func is np.expand_dims: + if normalized_axis == -1: + # normalized_axis will be -1 when expanding first dimension + # normalized_axis = 0 will expand in the second dimension + return True + else: + if normalized_axis == 0: + return True # Special case for np.rollaxis if func is np.rollaxis: - start = bound.arguments.get("start", 0) - if start == 0: + if bound.arguments.get("start", 0) == 0: return True + # special case for np.rot90 if func is np.rot90: - axes = bound.arguments.get("axes", (0, 1)) - if 0 in axes: + if 0 in bound.arguments.get("axes", (0, 1)): return True ### 2) multi-axis permutation (e.g., transpose) ### axes = bound.arguments.get("axes", inspect._empty) if axes is not inspect._empty: if axes is None: - return ndim > 1 + return True # all axes permuted => affects axis 0 if _arg_as_sequence(axes): # if axis 0 is not at position 0 after permutation, it's moved idx = list(axes).index(0) diff --git a/tests/test_numpy_compatibility.py b/tests/test_numpy_compatibility.py index 5fb553e35..0317ccff1 100644 --- a/tests/test_numpy_compatibility.py +++ b/tests/test_numpy_compatibility.py @@ -660,7 +660,7 @@ def test_square_arrays(tsd, func, kwargs): assert np.isscalar(a) assert a == b else: - if axis == 0: + if (axis == 0) or (isinstance(axis, tuple) and 0 in axis): assert isinstance(a, (np.ndarray, Number)) np.testing.assert_array_almost_equal(a, b) else: @@ -692,7 +692,7 @@ def test_square_arrays(tsd, func, kwargs): ( "rollaxis", {"axis": 0, "start": 1}, - {"Tsd": nap.Tsd, "TsdFrame": np.ndarray, "TsdTensor": np.ndarray}, + {"Tsd": np.ndarray, "TsdFrame": np.ndarray, "TsdTensor": np.ndarray}, ), ("rollaxis", {"axis": 1, "start": 0}, np.ndarray), ("rollaxis", {"axis": 1, "start": 2}, (nap.TsdTensor, nap.TsdFrame)), @@ -746,6 +746,11 @@ def test_axis_moving(tsd, func, kwargs, expected_type): ("expand_dims", {"axis": 0}, np.ndarray), ("expand_dims", {"axis": 1}, (nap.TsdFrame, nap.TsdTensor)), ("expand_dims", {"axis": -1}, (nap.TsdFrame, nap.TsdTensor)), + ( + "expand_dims", + {"axis": -2}, + {"Tsd": np.ndarray, "TsdFrame": nap.TsdTensor, "TsdTensor": nap.TsdTensor}, + ), ("squeeze", {}, (nap.Tsd, nap.TsdFrame, nap.TsdTensor)), ( "ravel", From 3c433575bd231afa1c1533d04ac5fecdefe5d804 Mon Sep 17 00:00:00 2001 From: sjvenditto Date: Wed, 22 Oct 2025 14:36:54 -0400 Subject: [PATCH 187/244] drop metadata and column names --- pynapple/core/time_series.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py index eb6e4551b..56b4e21f2 100644 --- a/pynapple/core/time_series.py +++ b/pynapple/core/time_series.py @@ -72,7 +72,12 @@ def _get_class(data): def _initialize_tsd_output( - input_object, values, time_index=None, time_support=None, kwargs=None + input_object, + values, + time_index=None, + time_support=None, + drop_metadata=False, + kwargs=None, ): """ Initialize the output object for time series data, ensuring proper alignment of time indices @@ -128,7 +133,7 @@ def _initialize_tsd_output( cls = _get_class(values) # if out will be a tsdframe implement kwargs logic - if cls is TsdFrame: + if (cls is TsdFrame) and (not drop_metadata): # get eventual setting cols = kwargs.get("columns", None) metadata = kwargs.get("metadata", None) @@ -327,7 +332,7 @@ def __array_function__(self, func, types, args, kwargs): if modifies_time_axis(func, new_args, kwargs): return out else: - return _initialize_tsd_output(self, out) + return _initialize_tsd_output(self, out, drop_metadata=True) def as_array(self): """ From f08d7129ebdcb3c0d59238ebb75a68f57fc59f36 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 23 Oct 2025 16:08:02 +0000 Subject: [PATCH 188/244] docstrings --- doc/conf.py | 1 + pynapple/process/decoding.py | 71 ++++++++++++++++++------------- pynapple/process/tuning_curves.py | 50 ++++++++++++++-------- 3 files changed, 74 insertions(+), 48 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index bac03ca7e..1aea34c67 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -83,6 +83,7 @@ "python": ("https://docs.python.org/3/", None), "pandas": ("https://pandas.pydata.org/docs/", None), "xarray": ("https://docs.xarray.dev/en/stable/", None), + "scipy": ("https://docs.scipy.org/doc/scipy/", None), } # apidoc_module_dir = '../pynapple' diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index f9b44a837..e024e012a 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -24,7 +24,7 @@ def wrapper(*args, **kwargs): tuning_curves = kwargs["tuning_curves"] if not isinstance(tuning_curves, xr.DataArray): raise TypeError( - "tuning_curves should be an xr.DataArray as computed by compute_tuning_curves." + "tuning_curves should be an xarray.DataArray as computed by compute_tuning_curves." ) # check data @@ -156,16 +156,17 @@ def decode_bayes( If ``uniform_prior=True``, it is a uniform distribution over feature values. If ``uniform_prior=False``, it is based on the occupancy (i.e. the time spent in each feature bin during tuning curve estimation). - See:\n - Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. - (1998). Interpreting neuronal population activity by - reconstruction: unified framework with application to - hippocampal place cells. Journal of neurophysiology, 79(2), - 1017-1044. + References + ---------- + .. [1] Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. + (1998). Interpreting neuronal population activity by + reconstruction: unified framework with application to + hippocampal place cells. Journal of neurophysiology, 79(2), + 1017-1044. Parameters ---------- - tuning_curves : xr.DataArray + tuning_curves : xarray.DataArray Tuning curves as computed by `compute_tuning_curves`. data : TsGroup or TsdFrame Neural activity with the same keys as the tuning curves. @@ -381,19 +382,19 @@ def decode_template( The algorithm computes the distance between the observed neural activity and the tuning curves for every time bin. The decoded feature at each time bin corresponds to the tuning curve bin with the smallest distance. - See:\n - Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. - (1998). Interpreting neuronal population activity by - reconstruction: unified framework with application to - hippocampal place cells. Journal of neurophysiology, 79(2), - 1017-1044. + See :func:`scipy.spatial.distance.cdist` for available distance metrics and how they are computed. - See ``scipy.spatial.distance.cdist`` for available distance metrics and how they are computed: - https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html + References + ---------- + .. [1] Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. + (1998). Interpreting neuronal population activity by + reconstruction: unified framework with application to + hippocampal place cells. Journal of neurophysiology, 79(2), + 1017-1044. Parameters ---------- - tuning_curves : xr.DataArray + tuning_curves : xarray.DataArray Tuning curves as computed by `compute_tuning_curves`. data : TsGroup or TsdFrame Neural activity with the same keys as the tuning curves. @@ -401,20 +402,26 @@ def decode_template( epochs : IntervalSet The epochs on which decoding is computed bin_size : float - Bin size. Default is second. Use the parameter time_units to change it. + Bin size. Default is second. Use the parameter `time_units` to change it. metric : str or callable, optional The distance metric to use for template matching. - This is passed to `scipy.spatial.distance.cdist`. - If a string, the distance function can be ‘braycurtis’, ‘canberra’, - ‘chebyshev’, ‘cityblock’, ‘correlation’, ‘cosine’, ‘dice’, ‘euclidean’, - ‘hamming’, ‘jaccard’, ‘jensenshannon’, ‘kulczynski1’, ‘mahalanobis’, - ‘matching’, ‘minkowski’, ‘rogerstanimoto’, ‘russellrao’, ‘seuclidean’, - ‘sokalmichener’, ‘sokalsneath’, ‘sqeuclidean’, ‘yule’. - Default is 'correlation'. + + If a string, passed to :func:`scipy.spatial.distance.cdist`, must be one of: + ``braycurtis``, ``canberra``, ``chebyshev``, ``cityblock``, ``correlation``, + ``cosine``, ``dice``, ``euclidean``, ``hamming``, ``jaccard``, ``jensenshannon``, + ``kulczynski1``, ``mahalanobis``, ``matching``, ``minkowski``, ``rogerstanimoto``, + ``russellrao``, ``seuclidean``, ``sokalmichener``, ``sokalsneath``, + ``sqeuclidean`` or ``yule``. + + Default is ``correlation``. .. note:: - Some metrics may not be suitable for all types of data. - For example, if your tuning curves contain NaN values, you should not use 'hamming', as it does not handle NaNs. + Some metrics may not be suitable for all types of data. + For example, metrics such as ``hamming`` do not handle NaN values. + + If a callable, it must have the signature ``metric(u, v) -> float`` and + return the distance between two 1D arrays. + time_units : str, optional Time unit of the bin size ('s' [default], 'ms', 'us'). @@ -422,7 +429,7 @@ def decode_template( ------- Tsd The decoded feature - TsdFrame, TsdTensor + TsdFrame or TsdTensor The distance matrix between the neural activity and the tuning curves for each time bin. Examples @@ -581,7 +588,9 @@ def decode_template( def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): """ - Deprecated, use `decode` instead. + .. deprecated:: 0.9.2 + `decode_1d` will be removed in Pynapple 0.10.0, it is replaced by + `decode_bayes` because the latter works for N dimensions. """ warnings.warn( "decode_1d is deprecated and will be removed in a future version; use decode_bayes instead.", @@ -619,7 +628,9 @@ def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): def decode_2d(tuning_curves, group, ep, bin_size, xy, time_units="s", features=None): """ - Deprecated, use `decode` instead. + .. deprecated:: 0.9.2 + `decode_2d` will be removed in Pynapple 0.10.0, it is replaced by + `decode_bayes` because the latter works for N dimensions. """ warnings.warn( "decode_2d is deprecated and will be removed in a future version; use decode_bayes instead.", diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index f93979100..c3da71ee3 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -312,6 +312,11 @@ def compute_response_per_epoch(data, epochs_dict, return_pandas=False): return_pandas : bool, optional If True, the function returns a pandas.DataFrame instead of an xarray.DataArray. + Returns + ------- + xarray.DataArray + A tensor containing the tuning curves with labeled epochs. + Examples -------- This function is typically used for a set of discrete stimuli being presented for multiple epochs. @@ -348,11 +353,6 @@ def compute_response_per_epoch(data, epochs_dict, return_pandas=False): Coordinates: * unit (unit) int64 24B 0 1 2 * epochs (epochs) Date: Thu, 23 Oct 2025 16:50:34 +0000 Subject: [PATCH 189/244] add bin_size test --- pynapple/process/decoding.py | 22 +++++++++++++++++----- pynapple/process/tuning_curves.py | 2 +- tests/test_decoding.py | 16 ++++++++++++---- 3 files changed, 30 insertions(+), 10 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index e024e012a..995ddc1ae 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -39,7 +39,7 @@ def wrapper(*args, **kwargs): raise TypeError("Unknown format for data.") kwargs["data"] = data - # check match + # check match tuning curves and data if tuning_curves.sizes["unit"] != data.shape[1]: raise ValueError("Different shapes for tuning_curves and data.") if not np.all(tuning_curves.coords["unit"] == data.columns.values): @@ -54,6 +54,18 @@ def wrapper(*args, **kwargs): "uniform_prior set to False but no occupancy found in tuning curves." ) + # check match data and bin_size + if isinstance(data, (nap.Tsd, nap.TsdFrame)): + actual_bin_size = np.mean(data.time_diff().values) + # actual_bin_size = nap.TsIndex.format_timestamps( + # np.array([np.mean(data.time_diff().values)], dtype=np.float64), + # time_units, + # )[0] + if not np.isclose(actual_bin_size, kwargs["bin_size"]): + raise ValueError( + "passed bin_size too different from actual data bin size." + ) + # Call the original function with validated inputs return func(**kwargs) @@ -167,14 +179,14 @@ def decode_bayes( Parameters ---------- tuning_curves : xarray.DataArray - Tuning curves as computed by `compute_tuning_curves`. + Tuning curves as computed by :func:`~pynapple.process.tuning_curves.compute_tuning_curves`. data : TsGroup or TsdFrame Neural activity with the same keys as the tuning curves. You may also pass a TsdFrame with smoothed counts. epochs : IntervalSet The epochs on which decoding is computed bin_size : float - Bin size. Default is second. Use the parameter time_units to change it. + Bin size. Default is second. Use ``time_units`` to change it. time_units : str, optional Time unit of the bin size ('s' [default], 'ms', 'us'). uniform_prior : bool, optional @@ -395,14 +407,14 @@ def decode_template( Parameters ---------- tuning_curves : xarray.DataArray - Tuning curves as computed by `compute_tuning_curves`. + Tuning curves as computed by :func:`~pynapple.process.tuning_curves.compute_tuning_curves`. data : TsGroup or TsdFrame Neural activity with the same keys as the tuning curves. You may also pass a TsdFrame with smoothed counts. epochs : IntervalSet The epochs on which decoding is computed bin_size : float - Bin size. Default is second. Use the parameter `time_units` to change it. + Bin size. Default is second. Use ``time_units`` to change it. metric : str or callable, optional The distance metric to use for template matching. diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index c3da71ee3..7d50bd1c6 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -452,7 +452,7 @@ def compute_mutual_information(tuning_curves): Parameters ---------- tuning_curves : xarray.DataArray - As computed by `compute_tuning_curves`. + Tuning curves as computed by :func:`compute_tuning_curves`. Returns ------- diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 884a15053..74c6ec6da 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -42,7 +42,7 @@ def get_testing_set_n(n_features=1, binned=False): "tuning_curves": tuning_curves, "data": data, "epochs": epochs, - "bin_size": 1, + "bin_size": 1.0, } @@ -55,21 +55,21 @@ def get_testing_set_n(n_features=1, binned=False): {"tuning_curves": []}, pytest.raises( TypeError, - match="tuning_curves should be an xr.DataArray as computed by compute_tuning_curves.", + match="tuning_curves should be an xarray.DataArray as computed by compute_tuning_curves.", ), ), ( {"tuning_curves": 1}, pytest.raises( TypeError, - match="tuning_curves should be an xr.DataArray as computed by compute_tuning_curves.", + match="tuning_curves should be an xarray.DataArray as computed by compute_tuning_curves.", ), ), ( {"tuning_curves": get_testing_set_n()["tuning_curves"].to_pandas().T}, pytest.raises( TypeError, - match="tuning_curves should be an xr.DataArray as computed by compute_tuning_curves.", + match="tuning_curves should be an xarray.DataArray as computed by compute_tuning_curves.", ), ), ( @@ -156,6 +156,14 @@ def get_testing_set_n(n_features=1, binned=False): get_testing_set_n(3, binned=True), does_not_raise(), ), + # bin_size + ( + {"data": get_testing_set_n(binned=True)["data"], "bin_size": 2.0}, + pytest.raises( + ValueError, + match="passed bin_size too different from actual data bin size.", + ), + ), ], ) def test_decode_input_errors(overwrite_default_args, expectation): From 9174a825df3cfb5c7b3e24bd2fdaa8373fbbf2f1 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Thu, 23 Oct 2025 19:10:48 +0000 Subject: [PATCH 190/244] fix bin_size test todo: add value tests for providing different bin sizes and units --- pynapple/process/decoding.py | 40 ++++++++++++++++++++---------------- tests/test_decoding.py | 18 ++++++++++++++++ 2 files changed, 40 insertions(+), 18 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 995ddc1ae..e49404eec 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -18,7 +18,9 @@ def _format_decoding_inputs(func): def wrapper(*args, **kwargs): # Validate each positional argument sig = inspect.signature(func) - kwargs = sig.bind_partial(*args, **kwargs).arguments + bound = sig.bind(*args, **kwargs) + bound.apply_defaults() + kwargs = bound.arguments # check tuning curves tuning_curves = kwargs["tuning_curves"] @@ -29,13 +31,27 @@ def wrapper(*args, **kwargs): # check data data = kwargs["data"] - if isinstance(data, nap.TsGroup): + if isinstance(data, nap.TsdFrame): + # check match bin_size + actual_bin_size = np.mean(data.time_diff().values) + passed_bin_size = kwargs["bin_size"] + if not isinstance(passed_bin_size, (int, float)): + raise ValueError("bin_size should be a number.") + if not np.isclose( + actual_bin_size, + nap.TsIndex.format_timestamps( + np.array([passed_bin_size], dtype=np.float64), + units=kwargs["time_units"], + ), + )[0]: + raise ValueError( + "passed bin_size too different from actual data bin size." + ) + elif isinstance(data, nap.TsGroup): data = data.count( - kwargs["bin_size"], - kwargs.get("epochs", None), - kwargs.get("time_units", "s"), + kwargs["bin_size"], kwargs["epochs"], kwargs["time_units"] ) - elif not isinstance(data, nap.TsdFrame): + else: raise TypeError("Unknown format for data.") kwargs["data"] = data @@ -54,18 +70,6 @@ def wrapper(*args, **kwargs): "uniform_prior set to False but no occupancy found in tuning curves." ) - # check match data and bin_size - if isinstance(data, (nap.Tsd, nap.TsdFrame)): - actual_bin_size = np.mean(data.time_diff().values) - # actual_bin_size = nap.TsIndex.format_timestamps( - # np.array([np.mean(data.time_diff().values)], dtype=np.float64), - # time_units, - # )[0] - if not np.isclose(actual_bin_size, kwargs["bin_size"]): - raise ValueError( - "passed bin_size too different from actual data bin size." - ) - # Call the original function with validated inputs return func(**kwargs) diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 74c6ec6da..c60283bae 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -157,6 +157,20 @@ def get_testing_set_n(n_features=1, binned=False): does_not_raise(), ), # bin_size + ( + {"data": get_testing_set_n(binned=True)["data"], "bin_size": None}, + pytest.raises( + ValueError, + match="bin_size should be a number.", + ), + ), + ( + {"data": get_testing_set_n(binned=True)["data"], "bin_size": "1.0"}, + pytest.raises( + ValueError, + match="bin_size should be a number.", + ), + ), ( {"data": get_testing_set_n(binned=True)["data"], "bin_size": 2.0}, pytest.raises( @@ -164,6 +178,10 @@ def get_testing_set_n(n_features=1, binned=False): match="passed bin_size too different from actual data bin size.", ), ), + ( + {"data": get_testing_set_n(binned=True)["data"], "bin_size": 1.0}, + does_not_raise(), + ), ], ) def test_decode_input_errors(overwrite_default_args, expectation): From 2f4c207be797ad95874a25f278daea72bdbe5eb6 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 24 Oct 2025 08:30:19 +0000 Subject: [PATCH 191/244] testing bin_size and time_units for decoding --- doc/examples/tutorial_calcium_imaging.md | 1 + pynapple/process/decoding.py | 2 +- tests/test_decoding.py | 20 +++++++++++--------- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index 243176b0d..6d978d9d1 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -161,6 +161,7 @@ For calcium imaging data, Pynapple has `decode_template`, which implements a tem ```{code-cell} ipython3 epochs = nap.IntervalSet([50, 150]) +transients = transients.bin_average(0.1, epochs) decoded, dist = nap.decode_template( tuning_curves=tuning_curves, data=transients, diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index e49404eec..ce9796b92 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -49,7 +49,7 @@ def wrapper(*args, **kwargs): ) elif isinstance(data, nap.TsGroup): data = data.count( - kwargs["bin_size"], kwargs["epochs"], kwargs["time_units"] + kwargs["bin_size"], kwargs["epochs"], time_units=kwargs["time_units"] ) else: raise TypeError("Unknown format for data.") diff --git a/tests/test_decoding.py b/tests/test_decoding.py index c60283bae..8ed103b20 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -9,7 +9,7 @@ import pynapple as nap -def get_testing_set_n(n_features=1, binned=False): +def get_testing_set_n(n_features=1, binned=False, bin_size=1.0, time_units="s"): combos = np.array(list(product([0, 1], repeat=n_features))) # (2^F, F) reps = 5 feature_data = np.tile(combos, (reps, 1)) # (T, F) @@ -26,7 +26,7 @@ def get_testing_set_n(n_features=1, binned=False): ) if binned: - frame = data.count(bin_size=1, ep=epochs) + frame = data.count(bin_size=bin_size, ep=epochs, time_units=time_units) data = nap.TsdFrame( frame.times() - 0.5, frame.values, @@ -42,7 +42,7 @@ def get_testing_set_n(n_features=1, binned=False): "tuning_curves": tuning_curves, "data": data, "epochs": epochs, - "bin_size": 1.0, + "bin_size": bin_size, } @@ -227,16 +227,17 @@ def test_decode_bayes_input_errors(overwrite_default_args, expectation): @pytest.mark.parametrize("uniform_prior", [True, False]) @pytest.mark.parametrize("n_features", [1, 2, 3]) @pytest.mark.parametrize("binned", [True, False]) -def test_decode_bayes(n_features, binned, uniform_prior): +@pytest.mark.parametrize("bin_size, time_units", [(1.0, "s"), (1e3, "ms"), (1e6, "us")]) +def test_decode_bayes(n_features, binned, bin_size, time_units, uniform_prior): features, tuning_curves, data, epochs, bin_size = get_testing_set_n( - n_features, binned=binned + n_features, binned=binned, bin_size=bin_size, time_units=time_units ).values() decoded, proba = nap.decode_bayes( tuning_curves=tuning_curves, data=data, epochs=epochs, bin_size=bin_size, - time_units="s", + time_units=time_units, uniform_prior=uniform_prior, ) @@ -258,9 +259,10 @@ def test_decode_bayes(n_features, binned, uniform_prior): @pytest.mark.parametrize("metric", ["correlation", "euclidean", "cosine"]) @pytest.mark.parametrize("n_features", [1, 2, 3]) @pytest.mark.parametrize("binned", [True, False]) -def test_decode_template(n_features, binned, metric): +@pytest.mark.parametrize("bin_size, time_units", [(1.0, "s"), (1e3, "ms"), (1e6, "us")]) +def test_decode_template(metric, n_features, binned, bin_size, time_units): features, tuning_curves, data, epochs, bin_size = get_testing_set_n( - n_features, binned=binned + n_features, binned=binned, bin_size=bin_size, time_units=time_units ).values() decoded, dist = nap.decode_template( tuning_curves=tuning_curves, @@ -268,7 +270,7 @@ def test_decode_template(n_features, binned, metric): epochs=epochs, metric=metric, bin_size=bin_size, - time_units="s", + time_units=time_units, ) assert isinstance(decoded, nap.Tsd if features.shape[1] == 1 else nap.TsdFrame) From 11085e4aa02df4e8a476a28f50594233b6b58125 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 24 Oct 2025 09:17:55 +0000 Subject: [PATCH 192/244] smoothing --- doc/examples/tutorial_calcium_imaging.md | 2 +- pynapple/process/decoding.py | 41 ++++++++++++++-- tests/test_decoding.py | 61 ++++++++++++++++++++++++ 3 files changed, 98 insertions(+), 6 deletions(-) diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md index 6d978d9d1..396a8bcf1 100644 --- a/doc/examples/tutorial_calcium_imaging.md +++ b/doc/examples/tutorial_calcium_imaging.md @@ -161,7 +161,7 @@ For calcium imaging data, Pynapple has `decode_template`, which implements a tem ```{code-cell} ipython3 epochs = nap.IntervalSet([50, 150]) -transients = transients.bin_average(0.1, epochs) +transients = transients.bin_average(0.1) decoded, dist = nap.decode_template( tuning_curves=tuning_curves, data=transients, diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index ce9796b92..fda2c46a7 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -70,6 +70,21 @@ def wrapper(*args, **kwargs): "uniform_prior set to False but no occupancy found in tuning curves." ) + # smooth + smoothing = kwargs["smoothing"] + smoothing_window = kwargs["smoothing_window"] + if smoothing is not None: + if smoothing not in ["gaussian", "uniform"]: + raise ValueError("smoothing should be one of 'gaussian' or 'uniform'.") + if not isinstance(smoothing_window, (int, float)): + raise ValueError("smoothing_window should be a number.") + if smoothing_window == "gaussian": + data = data.smooth(smoothing_window, time_units=kwargs["time_units"]) + else: + data = data.convolve( + np.ones(int(smoothing_window / kwargs["bin_size"])) + ) + # Call the original function with validated inputs return func(**kwargs) @@ -137,7 +152,14 @@ def _format_decoding_outputs(dist, tuning_curves, data, epochs, greater_is_bette @_format_decoding_inputs def decode_bayes( - tuning_curves, data, epochs, bin_size, time_units="s", uniform_prior=True + tuning_curves, + data, + epochs, + bin_size, + smoothing=None, + smoothing_window=None, + time_units="s", + uniform_prior=True, ): """ Performs Bayesian decoding over n-dimensional features. @@ -190,9 +212,13 @@ def decode_bayes( epochs : IntervalSet The epochs on which decoding is computed bin_size : float - Bin size. Default is second. Use ``time_units`` to change it. + Bin size. Default in seconds. Use ``time_units`` to change it. + smoothing : str, optional + Type of smoothing to apply to the binned spikes counts (``None`` [default], ``gaussian``, ``uniform``). + smoothing_window : float, optional + Size smoothing window. Default in seconds. Use ``time_units`` to change it. time_units : str, optional - Time unit of the bin size ('s' [default], 'ms', 'us'). + Time unit of the bin size (``s`` [default], ``ms``, ``us``). uniform_prior : bool, optional If True (default), uses a uniform distribution as a prior. If False, uses the occupancy from the tuning curves as a prior over the feature @@ -378,6 +404,8 @@ def decode_template( epochs, bin_size, metric="correlation", + smoothing=None, + smoothing_window=None, time_units="s", ): """ @@ -437,9 +465,12 @@ def decode_template( If a callable, it must have the signature ``metric(u, v) -> float`` and return the distance between two 1D arrays. - + smoothing : str, optional + Type of smoothing to apply to the binned spikes counts (``None`` [default], ``gaussian``, ``uniform``). + smoothing_window : float, optional + Size smoothing window. Default in seconds. Use ``time_units`` to change it. time_units : str, optional - Time unit of the bin size ('s' [default], 'ms', 'us'). + Time unit of the bin size (``s`` [default], ``ms``, ``us``). Returns ------- diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 8ed103b20..d6f05bf20 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -156,6 +156,67 @@ def get_testing_set_n(n_features=1, binned=False, bin_size=1.0, time_units="s"): get_testing_set_n(3, binned=True), does_not_raise(), ), + # smoothing + ( + {"smoothing": "1"}, + pytest.raises( + ValueError, + match="smoothing should be one of 'gaussian' or 'uniform'.", + ), + ), + ( + {"smoothing": 1}, + pytest.raises( + ValueError, + match="smoothing should be one of 'gaussian' or 'uniform'.", + ), + ), + ( + {"smoothing": "gaussian", "smoothing_window": 1}, + does_not_raise(), + ), + ( + {"smoothing": "uniform", "smoothing_window": 1}, + does_not_raise(), + ), + ( + { + "data": get_testing_set_n(binned=True)["data"], + "smoothing": "gaussian", + "smoothing_window": 1, + }, + does_not_raise(), + ), + ( + { + **get_testing_set_n(2, binned=True), + "smoothing": "gaussian", + "smoothing_window": 1, + }, + does_not_raise(), + ), + # smoothing_window + ( + {"smoothing": "gaussian"}, + pytest.raises( + ValueError, + match="smoothing_window should be a number.", + ), + ), + ( + {"smoothing": "gaussian", "smoothing_window": "1"}, + pytest.raises( + ValueError, + match="smoothing_window should be a number.", + ), + ), + ( + {"smoothing": "gaussian", "smoothing_window": []}, + pytest.raises( + ValueError, + match="smoothing_window should be a number.", + ), + ), # bin_size ( {"data": get_testing_set_n(binned=True)["data"], "bin_size": None}, From 879e880e4e4648f414ea9b8e9ba4268a92efb220 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 24 Oct 2025 09:35:06 +0000 Subject: [PATCH 193/244] user guide --- doc/user_guide/07_decoding.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index d86f3ad67..3b3ca0c47 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -32,6 +32,8 @@ Input to both decoding functions always includes: - `tuning_curves`, computed using [`compute_tuning_curves`](pynapple.process.tuning_curves.compute_tuning_curves). - `data`, neural activity as a `TsGroup` (spikes) or `TsdFrame` (smoothed counts or calcium activity or any other time series). - `epochs`, to restrict decoding to certain intervals. + - `smoothing`, type of smoothing to apply to `data`, defaults to `None`, indicating no smoothing, but can be `gaussian` or `uniform`. + - `smoothing_window`, smoothing window to use if `smoothing` is provided. - `bin_size`, the size of the bins in which to count timestamps when data is a `TsGroup` object. - `time_units`, the units of `bin_size`, defaulting to seconds. @@ -40,7 +42,7 @@ When using Bayesian decoding, users can additionally set `uniform_prior=False` t By default `uniform_prior=True`, and a uniform prior is used. :::{important} -Bayesian decoding should only be used with spike or rate data, as these can be assumed to follow a Poisson distribution! +Bayesian decoding should only be used with spike (`TsGroup`) or spike count (`TsdFrame`) data, as these can be assumed to follow a Poisson distribution! ::: @@ -401,7 +403,7 @@ decoded, dist = nap.decode_template( tuning_curves=tuning_curves_2d, data=tsdframe, epochs=epochs, - bin_size=0.2, + bin_size=0.01, metric="correlation" ) ``` From be66ccce54401c4ea2e0e053cb3a504c886294af Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 24 Oct 2025 10:09:26 +0000 Subject: [PATCH 194/244] add smoothing to user guide --- doc/user_guide/07_decoding.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md index 3b3ca0c47..ba98a2b12 100644 --- a/doc/user_guide/07_decoding.md +++ b/doc/user_guide/07_decoding.md @@ -98,13 +98,17 @@ tuning_curves_1d.plot.line(x="Circular feature", add_legend=False) plt.show() ``` -We can then use `nap.decode_bayes` for Bayesian decoding: +We can then use `nap.decode_bayes` for Bayesian decoding. +We will use the `smoothing` and `smoothing_window` arguments to additionally smooth the +spike counts, this often helps with decoding: ```{code-cell} ipython3 decoded, proba_feature = nap.decode_bayes( tuning_curves=tuning_curves_1d, data=tsgroup, epochs=epochs, + smoothing="gaussian", + smoothing_window=0.1, bin_size=0.06, ) ``` @@ -201,7 +205,9 @@ decoded, proba_feature = nap.decode_bayes( tuning_curves=tuning_curves_2d, data=ts_group, epochs=epochs, - bin_size=0.2, + smoothing="gaussian", + smoothing_window=0.2, + bin_size=0.1, ) ``` From 465eebb1f390ed0366b8188cd9719938b44ea736 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 24 Oct 2025 10:10:32 +0000 Subject: [PATCH 195/244] smooth over epochs --- pynapple/process/decoding.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index fda2c46a7..f8dfc904c 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -79,10 +79,15 @@ def wrapper(*args, **kwargs): if not isinstance(smoothing_window, (int, float)): raise ValueError("smoothing_window should be a number.") if smoothing_window == "gaussian": - data = data.smooth(smoothing_window, time_units=kwargs["time_units"]) + data = data.smooth( + smoothing_window, + time_units=kwargs["time_units"], + ep=kwargs["epochs"], + ) else: data = data.convolve( - np.ones(int(smoothing_window / kwargs["bin_size"])) + np.ones(int(smoothing_window / kwargs["bin_size"])), + ep=kwargs["epochs"], ) # Call the original function with validated inputs From 2c74601c63227c4e9e4c0a9779b543f9b353eba2 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 24 Oct 2025 10:19:57 +0000 Subject: [PATCH 196/244] add smoothing tests --- tests/test_decoding.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/tests/test_decoding.py b/tests/test_decoding.py index d6f05bf20..f5c7ba14f 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -288,8 +288,14 @@ def test_decode_bayes_input_errors(overwrite_default_args, expectation): @pytest.mark.parametrize("uniform_prior", [True, False]) @pytest.mark.parametrize("n_features", [1, 2, 3]) @pytest.mark.parametrize("binned", [True, False]) -@pytest.mark.parametrize("bin_size, time_units", [(1.0, "s"), (1e3, "ms"), (1e6, "us")]) -def test_decode_bayes(n_features, binned, bin_size, time_units, uniform_prior): +@pytest.mark.parametrize("smoothing", ["gaussian", "uniform"]) +@pytest.mark.parametrize( + "bin_size, smoothing_window, time_units", + [(1.0, 2.0, "s"), (1e3, 2e3, "ms"), (1e6, 2e6, "us")], +) +def test_decode_bayes( + n_features, binned, bin_size, smoothing, smoothing_window, time_units, uniform_prior +): features, tuning_curves, data, epochs, bin_size = get_testing_set_n( n_features, binned=binned, bin_size=bin_size, time_units=time_units ).values() @@ -298,6 +304,8 @@ def test_decode_bayes(n_features, binned, bin_size, time_units, uniform_prior): data=data, epochs=epochs, bin_size=bin_size, + smoothing=smoothing, + smoothing_window=smoothing_window, time_units=time_units, uniform_prior=uniform_prior, ) @@ -320,8 +328,14 @@ def test_decode_bayes(n_features, binned, bin_size, time_units, uniform_prior): @pytest.mark.parametrize("metric", ["correlation", "euclidean", "cosine"]) @pytest.mark.parametrize("n_features", [1, 2, 3]) @pytest.mark.parametrize("binned", [True, False]) -@pytest.mark.parametrize("bin_size, time_units", [(1.0, "s"), (1e3, "ms"), (1e6, "us")]) -def test_decode_template(metric, n_features, binned, bin_size, time_units): +@pytest.mark.parametrize("smoothing", [None, "gaussian", "uniform"]) +@pytest.mark.parametrize( + "bin_size, smoothing_window, time_units", + [(1.0, 2.0, "s"), (1e3, 2e3, "ms"), (1e6, 2e6, "us")], +) +def test_decode_template( + metric, n_features, binned, bin_size, smoothing, smoothing_window, time_units +): features, tuning_curves, data, epochs, bin_size = get_testing_set_n( n_features, binned=binned, bin_size=bin_size, time_units=time_units ).values() @@ -331,6 +345,8 @@ def test_decode_template(metric, n_features, binned, bin_size, time_units): epochs=epochs, metric=metric, bin_size=bin_size, + smoothing=smoothing, + smoothing_window=smoothing_window, time_units=time_units, ) From 9ecbae0650864acc551a827ee10a3fe5b4e14c11 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Fri, 24 Oct 2025 10:28:08 +0000 Subject: [PATCH 197/244] correctly smooth --- pynapple/process/decoding.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index f8dfc904c..f8bb07926 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -78,11 +78,10 @@ def wrapper(*args, **kwargs): raise ValueError("smoothing should be one of 'gaussian' or 'uniform'.") if not isinstance(smoothing_window, (int, float)): raise ValueError("smoothing_window should be a number.") - if smoothing_window == "gaussian": + if smoothing == "gaussian": data = data.smooth( smoothing_window, time_units=kwargs["time_units"], - ep=kwargs["epochs"], ) else: data = data.convolve( From cbf30866fcd08d57a335ca1485eb03d0aa37979e Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 27 Oct 2025 15:34:54 +0000 Subject: [PATCH 198/244] switch to FutureWarning + normalise when convolving continuous data in decoding --- pynapple/process/decoding.py | 11 ++++++++--- pynapple/process/tuning_curves.py | 14 +++++++------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index f8bb07926..703801d41 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -31,6 +31,7 @@ def wrapper(*args, **kwargs): # check data data = kwargs["data"] + was_continuous = True if isinstance(data, nap.TsdFrame): # check match bin_size actual_bin_size = np.mean(data.time_diff().values) @@ -51,6 +52,7 @@ def wrapper(*args, **kwargs): data = data.count( kwargs["bin_size"], kwargs["epochs"], time_units=kwargs["time_units"] ) + was_continuous = True else: raise TypeError("Unknown format for data.") kwargs["data"] = data @@ -84,10 +86,13 @@ def wrapper(*args, **kwargs): time_units=kwargs["time_units"], ) else: + smoothing_window_bins = int(smoothing_window / kwargs["bin_size"]) data = data.convolve( - np.ones(int(smoothing_window / kwargs["bin_size"])), + np.ones(smoothing_window_bins), ep=kwargs["epochs"], ) + if was_continuous: + data = data / smoothing_window_bins # Call the original function with validated inputs return func(**kwargs) @@ -645,7 +650,7 @@ def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): """ warnings.warn( "decode_1d is deprecated and will be removed in a future version; use decode_bayes instead.", - DeprecationWarning, + FutureWarning, stacklevel=2, ) # Occupancy @@ -685,7 +690,7 @@ def decode_2d(tuning_curves, group, ep, bin_size, xy, time_units="s", features=N """ warnings.warn( "decode_2d is deprecated and will be removed in a future version; use decode_bayes instead.", - DeprecationWarning, + FutureWarning, stacklevel=2, ) # Occupancy diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index 7d50bd1c6..d220c044d 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -601,7 +601,7 @@ def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): warnings.warn( "compute_1d_tuning_curves is deprecated and will be removed in a future version;" "use compute_tuning_curves instead.", - DeprecationWarning, + FutureWarning, stacklevel=2, ) return ( @@ -629,7 +629,7 @@ def compute_1d_tuning_curves_continuous( warnings.warn( "compute_1d_tuning_curves_continuous is deprecated and will be removed in a future version;" "use compute_tuning_curves instead.", - DeprecationWarning, + FutureWarning, stacklevel=2, ) return ( @@ -655,7 +655,7 @@ def compute_2d_tuning_curves(group, features, nb_bins, ep=None, minmax=None): warnings.warn( "compute_2d_tuning_curves is deprecated and will be removed in a future version;" "use compute_tuning_curves instead.", - DeprecationWarning, + FutureWarning, stacklevel=2, ) xarray = compute_tuning_curves( @@ -684,7 +684,7 @@ def compute_2d_tuning_curves_continuous( warnings.warn( "compute_2d_tuning_curves_continuous is deprecated and will be removed in a future version;" "use compute_tuning_curves instead.", - DeprecationWarning, + FutureWarning, stacklevel=2, ) xarray = compute_tuning_curves( @@ -711,7 +711,7 @@ def compute_discrete_tuning_curves(group, dict_ep): warnings.warn( "compute_discrete_tuning_curves is deprecated and will be removed in a future version;" "use compute_response_per_epoch instead.", - DeprecationWarning, + FutureWarning, stacklevel=2, ) @@ -728,7 +728,7 @@ def compute_2d_mutual_info(dict_tc, features, ep=None, minmax=None, bitssec=Fals warnings.warn( "compute_2d_mutual_info is deprecated and will be removed in a future version;" "use compute_mutual_information instead.", - DeprecationWarning, + FutureWarning, stacklevel=2, ) if type(dict_tc) is dict: @@ -785,7 +785,7 @@ def compute_1d_mutual_info(tc, feature, ep=None, minmax=None, bitssec=False): warnings.warn( "compute_1d_mutual_info is deprecated and will be removed in a future version;" "use compute_mutual_information instead.", - DeprecationWarning, + FutureWarning, stacklevel=2, ) if isinstance(tc, pd.DataFrame): From e2f68297b0f25ad75c1e571e66aba6b75bc9b733 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 27 Oct 2025 19:41:29 +0000 Subject: [PATCH 199/244] fix smoothing tests --- pynapple/process/decoding.py | 6 ++++-- tests/test_decoding.py | 9 +++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 703801d41..6e1d3e808 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -55,7 +55,6 @@ def wrapper(*args, **kwargs): was_continuous = True else: raise TypeError("Unknown format for data.") - kwargs["data"] = data # check match tuning curves and data if tuning_curves.sizes["unit"] != data.shape[1]: @@ -86,13 +85,16 @@ def wrapper(*args, **kwargs): time_units=kwargs["time_units"], ) else: - smoothing_window_bins = int(smoothing_window / kwargs["bin_size"]) + smoothing_window_bins = max( + 1, int(smoothing_window / kwargs["bin_size"]) + ) data = data.convolve( np.ones(smoothing_window_bins), ep=kwargs["epochs"], ) if was_continuous: data = data / smoothing_window_bins + kwargs["data"] = data # Call the original function with validated inputs return func(**kwargs) diff --git a/tests/test_decoding.py b/tests/test_decoding.py index f5c7ba14f..8a8d6c6db 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -288,10 +288,10 @@ def test_decode_bayes_input_errors(overwrite_default_args, expectation): @pytest.mark.parametrize("uniform_prior", [True, False]) @pytest.mark.parametrize("n_features", [1, 2, 3]) @pytest.mark.parametrize("binned", [True, False]) -@pytest.mark.parametrize("smoothing", ["gaussian", "uniform"]) +@pytest.mark.parametrize("smoothing", [None, "gaussian", "uniform"]) @pytest.mark.parametrize( "bin_size, smoothing_window, time_units", - [(1.0, 2.0, "s"), (1e3, 2e3, "ms"), (1e6, 2e6, "us")], + [(1.0, 0.5, "s"), (1e3, 5e2, "ms"), (1e6, 5e5, "us")], ) def test_decode_bayes( n_features, binned, bin_size, smoothing, smoothing_window, time_units, uniform_prior @@ -309,6 +309,7 @@ def test_decode_bayes( time_units=time_units, uniform_prior=uniform_prior, ) + features = decoded.value_from(features, ep=decoded.time_support, mode="before") assert isinstance(decoded, nap.Tsd if features.shape[1] == 1 else nap.TsdFrame) np.testing.assert_array_almost_equal(decoded.values, features.values.squeeze()) @@ -317,7 +318,7 @@ def test_decode_bayes( proba, nap.TsdFrame if features.shape[1] == 1 else nap.TsdTensor, ) - expected_proba = np.zeros((len(features), *tuning_curves.shape[1:])) + expected_proba = np.zeros((len(features), *tuning_curves.shape[0:])) target_indices = [np.arange(len(features))] + [ features[:, d] for d in range(features.shape[1]) ] @@ -331,7 +332,7 @@ def test_decode_bayes( @pytest.mark.parametrize("smoothing", [None, "gaussian", "uniform"]) @pytest.mark.parametrize( "bin_size, smoothing_window, time_units", - [(1.0, 2.0, "s"), (1e3, 2e3, "ms"), (1e6, 2e6, "us")], + [(1.0, 0.5, "s"), (1e3, 5e2, "ms"), (1e6, 5e5, "us")], ) def test_decode_template( metric, n_features, binned, bin_size, smoothing, smoothing_window, time_units From 5535564c059c45535ddc1f7c91684e0572fdaaab Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 27 Oct 2025 19:49:41 +0000 Subject: [PATCH 200/244] shape fix --- tests/test_decoding.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_decoding.py b/tests/test_decoding.py index 8a8d6c6db..c707201ca 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -318,7 +318,7 @@ def test_decode_bayes( proba, nap.TsdFrame if features.shape[1] == 1 else nap.TsdTensor, ) - expected_proba = np.zeros((len(features), *tuning_curves.shape[0:])) + expected_proba = np.zeros((len(features), *tuning_curves.shape[1:])) target_indices = [np.arange(len(features))] + [ features[:, d] for d in range(features.shape[1]) ] From ed7ad7d6e13aa0659ae8f7d57aee006ed4a3fda1 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 27 Oct 2025 19:51:58 +0000 Subject: [PATCH 201/244] fix version numbers to 1.0.0 --- pynapple/process/decoding.py | 4 ++-- pynapple/process/tuning_curves.py | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py index 6e1d3e808..49db57ff9 100644 --- a/pynapple/process/decoding.py +++ b/pynapple/process/decoding.py @@ -647,7 +647,7 @@ def decode_template( def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): """ .. deprecated:: 0.9.2 - `decode_1d` will be removed in Pynapple 0.10.0, it is replaced by + `decode_1d` will be removed in Pynapple 1.0.0, it is replaced by `decode_bayes` because the latter works for N dimensions. """ warnings.warn( @@ -687,7 +687,7 @@ def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None): def decode_2d(tuning_curves, group, ep, bin_size, xy, time_units="s", features=None): """ .. deprecated:: 0.9.2 - `decode_2d` will be removed in Pynapple 0.10.0, it is replaced by + `decode_2d` will be removed in Pynapple 1.0.0, it is replaced by `decode_bayes` because the latter works for N dimensions. """ warnings.warn( diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py index d220c044d..867d3868e 100644 --- a/pynapple/process/tuning_curves.py +++ b/pynapple/process/tuning_curves.py @@ -595,7 +595,7 @@ def wrapper(*args, **kwargs): def compute_1d_tuning_curves(group, feature, nb_bins, ep=None, minmax=None): """ .. deprecated:: 0.9.2 - `compute_1d_tuning_curves` will be removed in Pynapple 0.10.0, it is replaced by + `compute_1d_tuning_curves` will be removed in Pynapple 1.0.0, it is replaced by `compute_tuning_curves` because the latter works for N dimensions. """ warnings.warn( @@ -623,7 +623,7 @@ def compute_1d_tuning_curves_continuous( ): """ .. deprecated:: 0.9.2 - `compute_1d_tuning_curves` will be removed in Pynapple 0.10.0, it is replaced by + `compute_1d_tuning_curves` will be removed in Pynapple 1.0.0, it is replaced by `compute_tuning_curves` because the latter works for N dimensions and continuous data. """ warnings.warn( @@ -649,7 +649,7 @@ def compute_1d_tuning_curves_continuous( def compute_2d_tuning_curves(group, features, nb_bins, ep=None, minmax=None): """ .. deprecated:: 0.9.2 - `compute_1d_tuning_curves` will be removed in Pynapple 0.10.0, it is replaced by + `compute_1d_tuning_curves` will be removed in Pynapple 1.0.0, it is replaced by `compute_tuning_curves` because the latter works for N dimensions. """ warnings.warn( @@ -678,7 +678,7 @@ def compute_2d_tuning_curves_continuous( ): """ .. deprecated:: 0.9.2 - `compute_1d_tuning_curves` will be removed in Pynapple 0.10.0, it is replaced by + `compute_1d_tuning_curves` will be removed in Pynapple 1.0.0, it is replaced by `compute_tuning_curves` because the latter works for N dimensions and continuous data. """ warnings.warn( @@ -705,7 +705,7 @@ def compute_2d_tuning_curves_continuous( def compute_discrete_tuning_curves(group, dict_ep): """ .. deprecated:: 0.9.2 - `compute_discrete_tuning_curves` will be removed in Pynapple 0.10.0, it is replaced by + `compute_discrete_tuning_curves` will be removed in Pynapple 1.0.0, it is replaced by `compute_response_per_epoch`. """ warnings.warn( @@ -722,7 +722,7 @@ def compute_discrete_tuning_curves(group, dict_ep): def compute_2d_mutual_info(dict_tc, features, ep=None, minmax=None, bitssec=False): """ .. deprecated:: 0.9.2 - `compute_2d_mutual_info` will be removed in Pynapple 0.10.0, it is replaced by + `compute_2d_mutual_info` will be removed in Pynapple 1.0.0, it is replaced by `compute_mutual_information` because the latter works for N dimensions. """ warnings.warn( @@ -779,7 +779,7 @@ def compute_2d_mutual_info(dict_tc, features, ep=None, minmax=None, bitssec=Fals def compute_1d_mutual_info(tc, feature, ep=None, minmax=None, bitssec=False): """ .. deprecated:: 0.9.2 - `compute_1d_mutual_info` will be removed in Pynapple 0.10.0, it is replaced by + `compute_1d_mutual_info` will be removed in Pynapple 1.0.0, it is replaced by `compute_mutual_information` because the latter works for N dimensions. """ warnings.warn( From eaeb1abcbfbf75cbbd7154fea955bdea1dec8d39 Mon Sep 17 00:00:00 2001 From: wulfdewolf Date: Mon, 27 Oct 2025 21:44:41 +0000 Subject: [PATCH 202/244] add tests for smoothing --- tests/test_decoding.py | 73 +++++++++++++++++++++++++++++------------- 1 file changed, 51 insertions(+), 22 deletions(-) diff --git a/tests/test_decoding.py b/tests/test_decoding.py index c707201ca..3d34c7702 100644 --- a/tests/test_decoding.py +++ b/tests/test_decoding.py @@ -36,6 +36,7 @@ def get_testing_set_n(n_features=1, binned=False, bin_size=1.0, time_units="s"): tuning_curves = nap.compute_tuning_curves( data, features, bins=2, range=[(-0.5, 1.5)] * n_features ) + tuning_curves.values += 1e-12 return { "features": features, @@ -286,12 +287,27 @@ def test_decode_bayes_input_errors(overwrite_default_args, expectation): @pytest.mark.parametrize("uniform_prior", [True, False]) -@pytest.mark.parametrize("n_features", [1, 2, 3]) @pytest.mark.parametrize("binned", [True, False]) @pytest.mark.parametrize("smoothing", [None, "gaussian", "uniform"]) @pytest.mark.parametrize( - "bin_size, smoothing_window, time_units", - [(1.0, 0.5, "s"), (1e3, 5e2, "ms"), (1e6, 5e5, "us")], + "n_features, bin_size, smoothing_window, time_units", + [ + (1, 1.0, 0.5, "s"), + (2, 1.0, 0.5, "s"), + (3, 1.0, 0.5, "s"), + (2, 1.0, 1.1, "s"), + (3, 1.0, 1.1, "s"), + (1, 1e3, 1e2, "ms"), + (2, 1e3, 1e2, "ms"), + (3, 1e3, 1e2, "ms"), + (2, 1e3, 1.1e3, "ms"), + (3, 1e3, 1.1e3, "ms"), + (1, 1e6, 1e5, "us"), + (2, 1e6, 1e5, "us"), + (3, 1e6, 1e5, "us"), + (2, 1e6, 1.1e6, "us"), + (3, 1e6, 1.1e6, "us"), + ], ) def test_decode_bayes( n_features, binned, bin_size, smoothing, smoothing_window, time_units, uniform_prior @@ -309,30 +325,44 @@ def test_decode_bayes( time_units=time_units, uniform_prior=uniform_prior, ) - features = decoded.value_from(features, ep=decoded.time_support, mode="before") - assert isinstance(decoded, nap.Tsd if features.shape[1] == 1 else nap.TsdFrame) np.testing.assert_array_almost_equal(decoded.values, features.values.squeeze()) - assert isinstance( - proba, - nap.TsdFrame if features.shape[1] == 1 else nap.TsdTensor, - ) - expected_proba = np.zeros((len(features), *tuning_curves.shape[1:])) - target_indices = [np.arange(len(features))] + [ - features[:, d] for d in range(features.shape[1]) - ] - expected_proba[tuple(target_indices)] = 1.0 - np.testing.assert_array_almost_equal(proba.values, expected_proba) + if smoothing is None or smoothing_window < bin_size: + assert isinstance( + proba, + nap.TsdFrame if features.shape[1] == 1 else nap.TsdTensor, + ) + expected_proba = np.zeros((len(features), *tuning_curves.shape[1:])) + target_indices = [np.arange(len(features))] + [ + features[:, d] for d in range(features.shape[1]) + ] + expected_proba[tuple(target_indices)] = 1.0 + np.testing.assert_array_almost_equal(proba.values, expected_proba) @pytest.mark.parametrize("metric", ["correlation", "euclidean", "cosine"]) -@pytest.mark.parametrize("n_features", [1, 2, 3]) @pytest.mark.parametrize("binned", [True, False]) -@pytest.mark.parametrize("smoothing", [None, "gaussian", "uniform"]) +@pytest.mark.parametrize("smoothing", ["gaussian", "uniform"]) @pytest.mark.parametrize( - "bin_size, smoothing_window, time_units", - [(1.0, 0.5, "s"), (1e3, 5e2, "ms"), (1e6, 5e5, "us")], + "n_features, bin_size, smoothing_window, time_units", + [ + (1, 1.0, 0.5, "s"), + (2, 1.0, 0.5, "s"), + (3, 1.0, 0.5, "s"), + (2, 1.0, 1.1, "s"), + (3, 1.0, 1.1, "s"), + (1, 1e3, 1e2, "ms"), + (2, 1e3, 1e2, "ms"), + (3, 1e3, 1e2, "ms"), + (2, 1e3, 1.1e3, "ms"), + (3, 1e3, 1.1e3, "ms"), + (1, 1e6, 1e5, "us"), + (2, 1e6, 1e5, "us"), + (3, 1e6, 1e5, "us"), + (2, 1e6, 1.1e6, "us"), + (3, 1e6, 1.1e6, "us"), + ], ) def test_decode_template( metric, n_features, binned, bin_size, smoothing, smoothing_window, time_units @@ -350,15 +380,14 @@ def test_decode_template( smoothing_window=smoothing_window, time_units=time_units, ) - assert isinstance(decoded, nap.Tsd if features.shape[1] == 1 else nap.TsdFrame) - np.testing.assert_array_almost_equal(decoded.values, features.values.squeeze()) - assert isinstance( dist, nap.TsdFrame if features.shape[1] == 1 else nap.TsdTensor, ) + np.testing.assert_allclose(decoded.values, features.values.squeeze()) + # ------------------------------------------------------------------------------------ # OLD DECODING TESTS From 617b9c98b0fa38728b145fec34af536638099a3a Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Mon, 27 Oct 2025 18:01:55 -0400 Subject: [PATCH 203/244] Updating releases.md --- doc/releases.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/doc/releases.md b/doc/releases.md index 1458675f7..86a7a7ab8 100644 --- a/doc/releases.md +++ b/doc/releases.md @@ -1,5 +1,21 @@ # Releases +### 0.10.0 (2025-10-27) + +- Generalizing `nap.compute_tuning_curves`. It can take any time series object (Tsd, TsdFrame, TsGroup, TsdTensor) as input and + work for any dimension of data. +- `nap.compute_1d_tuning_curve`, `nap.compute_2d_tuning_curve`, `nap.compute_1d_tuning_curve_continuous`, `nap.compute_2d_tuning_curve_continuous` + are being deprecated in favor of the general `nap.compute_tuning_curves`. +- Generalization of `nap.decode_1d` and `nap.decode_2d` to `nap.decode_bayes` for bayesian decoding of any dimension of data. +- New function `nap.decode_template` for template matching decoding of any dimension of data. +- Metadata can be restricted with `restrict_info`. +- New function `detect_oscillatory_events` to detect oscillatory events in a Tsd object. +- Fix TsdFrame `__repr__` for boolean data type. +- Refactoring of `nap.compute_mutual_information` to take as input xarray tuning curves object. +- `in_interval` method for IntervalSet to check if time points are within intervals. +- Refactoring `nap.compute_discrete_tuning_curves` to `compute_response_per_epoch`. +- Tuning curves function can return spike counts and occupancy separately. + ### 0.9.2 (2025-06-16) - Implement `time_diff` method for time series objects From 6e7a6ea6f1b3b9c01141c135cffe436acb10219d Mon Sep 17 00:00:00 2001 From: Guillaume Viejo Date: Tue, 28 Oct 2025 10:40:35 -0400 Subject: [PATCH 204/244] Update readme --- README.md | 34 +++++++++------------------------- 1 file changed, 9 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index bfcbd7a8a..5e6bd8675 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,12 @@ pynapple is a light-weight python library for neurophysiological data analysis. New release :fire: ------------------ +### pynapple >= 0.10.0 + +Tuning curves computation have been generated with the function `compute_tuning_curves`. +It can now return a [xarray DataArray](https://docs.xarray.dev/en/stable/) instead of a Pandas DataFrame. + + ### pynapple >= 0.8.2 The objects `IntervalSet`, `TsdFrame` and `TsGroup` inherits a new metadata class. It is now possible to add labels for @@ -38,27 +44,6 @@ nap.apply_bandpass_filter(signal, (10, 20), fs=1250) ``` New functions includes power spectral density and Morlet wavelet decomposition. See the [documentation](https://pynapple-org.github.io/pynapple/reference/process/) for more details. -### pynapple >= 0.6 - -Starting with 0.6, [`IntervalSet`](https://pynapple-org.github.io/pynapple/reference/core/interval_set/) objects are behaving as immutable numpy ndarray. Before 0.6, you could select an interval within an `IntervalSet` object with: - -```python -new_intervalset = intervalset.loc[[0]] # Selecting first interval -``` - -With pynapple>=0.6, the slicing is similar to numpy and it returns an `IntervalSet` - -```python -new_intervalset = intervalset[0] -``` - -### pynapple >= 0.4 - -Starting with 0.4, pynapple rely on the [numpy array container](https://numpy.org/doc/stable/user/basics.dispatch.html) approach instead of Pandas for the time series. Pynapple builtin functions will remain the same except for functions inherited from Pandas. - -This allows for a better handling of returned objects. - -Additionaly, it is now possible to define time series objects with more than 2 dimensions with `TsdTensor`. You can also look at this [notebook](https://pynapple-org.github.io/pynapple/generated/gallery/tutorial_pynapple_numpy/) for a demonstration of numpy compatibilities. Community --------- @@ -73,13 +58,11 @@ Getting Started The best way to install pynapple is with pip inside a new [conda](https://docs.conda.io/en/latest/) environment: ``` {.sourceCode .shell} -$ conda create --name pynapple pip python=3.8 +$ conda create --name pynapple pip python=3.11 $ conda activate pynapple $ pip install pynapple ``` -> **Note** -> The package uses a pyproject.toml file for installation and dependencies management. Running `pip install pynapple` will install all the dependencies, including: @@ -90,13 +73,14 @@ Running `pip install pynapple` will install all the dependencies, including: - pynwb 2.0 - tabulate - h5py +- xarray For development, see the [contributor guide](CONTRIBUTING.md) for steps to install from source code.