Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
49 commits
Select commit Hold shift + click to select a range
de4a3fb
resonatorV2 first version
Marius-Frohn May 6, 2025
a759dc7
First untested version
Marius-Frohn May 7, 2025
f4f7a03
Bug fixes automatic circle fit
Marius-Frohn May 8, 2025
cbca9af
Added autofit file
Marius-Frohn May 12, 2025
bc70cf7
fug bixes
Marius-Frohn May 12, 2025
4d3a754
Fore mug bixes
Marius-Frohn May 12, 2025
395f73f
Added untested unified logging
Marius-Frohn May 13, 2025
2af58bb
merge conflict with extra sleep
Marius-Frohn May 13, 2025
5b38efb
fug bixes
Marius-Frohn May 13, 2025
6317247
race conditions?
Marius-Frohn May 14, 2025
b35d583
suppressed circlefit warning amount
Marius-Frohn May 15, 2025
5a9527e
WIP
Marius-Frohn May 16, 2025
ddb4144
WIP
Marius-Frohn May 16, 2025
e444ef9
WIP
Marius-Frohn May 19, 2025
6038eef
Bug fixes + tested
Marius-Frohn May 19, 2025
a7ebeca
Fix 1D log trying to access non-existent dataset
Marius-Frohn May 22, 2025
8995af7
added storeRealImag parameter to spectroscopy to roughly half file si…
Marius-Frohn May 26, 2025
63dc862
merge
Marius-Frohn May 26, 2025
14f64fd
Added fano + (skewed) lorentzian maths as comments
Marius-Frohn May 27, 2025
228668a
set_log_function backwards compatibility, untested
Marius-Frohn May 27, 2025
94cbfb9
restored old resonator/circlefit implementations, made new resonator …
Marius-Frohn May 27, 2025
b3258f0
Revert hdf changes and adjust logging coordinates access
Marius-Frohn May 28, 2025
29c531a
storeRealImag default view bugfix
Marius-Frohn May 28, 2025
66e6b97
Create double_vta.py
Marius-Frohn May 28, 2025
be57e44
transport logfunc bugfix
Marius-Frohn Jun 4, 2025
0d9bf25
VTe added
Marius-Frohn Jun 10, 2025
c13f75c
Added effective min/max ranges calculation
Marius-Frohn Jun 11, 2025
67c4f88
Bugfixes
Marius-Frohn Jun 18, 2025
843afe4
Twosided measurement routine skeleton
Marius-Frohn Jun 20, 2025
6cf9236
Twoside prepare_file
Marius-Frohn Jun 26, 2025
e1fb779
Complete Twoside measure routine, untested
Marius-Frohn Jun 27, 2025
c50649c
Refactored ADWinProII driver and added DoubleSideSweep support
Marius-Frohn Jun 30, 2025
9c983cd
Merge branch 'DoubleVirtualTunnelElectronics' into 139-resonator-clas…
Marius-Frohn Jun 30, 2025
ad53bee
Added logging to twoside measure
Marius-Frohn Jun 30, 2025
7164792
merges
Marius-Frohn Jul 1, 2025
726b06f
Twoside Bugfixes
Marius-Frohn Jul 1, 2025
1e59db6
Keysight: 2-Channel I/V/R/P value address fix. Transport/Spectroscopy…
Marius-Frohn Jul 8, 2025
83df443
Explicit logging-handler close-file
Marius-Frohn Jul 8, 2025
e7dbf34
ADwin DoubleSweep BugFixes
Marius-Frohn Jul 9, 2025
143a893
Merge branch 'master' into 139-resonator-class-support-3d-measurements
Marius-Frohn Aug 11, 2025
54c2088
Merge branch 'master' into 139-resonator-class-support-3d-measurements
Marius-Frohn Aug 18, 2025
26adf3c
Merge remote-tracking branch 'origin/master'
Marius-Frohn Sep 26, 2025
0afd721
Circlefit less warnings, fix Keysight IV/VI sorting, minor spectrosco…
Marius-Frohn Sep 30, 2025
9d6b77b
Merge branch 'master' into 139-resonator-class-support-3d-measurements
Marius-Frohn Sep 30, 2025
2610c90
unified measure bugfixes
Marius-Frohn Sep 30, 2025
7ef3cc3
Transport + Deriv Bugfixes
Marius-Frohn Sep 30, 2025
79982e0
WIP
Marius-Frohn Oct 16, 2025
44b5124
WIPWIP
Marius-Frohn Oct 17, 2025
f355c10
AAAA
Marius-Frohn Oct 21, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion src/qkit/analysis/circle_fit/circle_fit_2019/circuit.py
Original file line number Diff line number Diff line change
Expand Up @@ -419,14 +419,16 @@ def _fit_phase(self, z_data, guesses=None):
phase = np.unwrap(np.angle(z_data))

# For centered circle roll-off should be close to 2pi. If not warn user.
if np.max(phase) - np.min(phase) <= 0.8*2*np.pi:
if np.max(phase) - np.min(phase) <= 0.4*2*np.pi:
logging.warning(
"Data does not cover a full circle (only {:.1f}".format(
np.max(phase) - np.min(phase)
)
+" rad). Increase the frequency span around the resonance?"
)
roll_off = np.max(phase) - np.min(phase)
elif np.max(phase) - np.min(phase) <= 0.8*2*np.pi:
roll_off = np.max(phase) - np.min(phase)
else:
roll_off = 2*np.pi

Expand Down
91 changes: 91 additions & 0 deletions src/qkit/analysis/crit_detection_iv.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
import itertools
from typing import override, Any, Literal
from enum import Enum
from scipy import signal
import numpy as np

from qkit.measure.unified_measurements import AnalysisTypeAdapter, MeasurementTypeAdapter, DataView, DataViewSet, DataReference
from qkit.analysis.numerical_derivative import SavgolNumericalDerivative

class CritDetectionIV(AnalysisTypeAdapter):
"""
Analyzes critical points of IV/VI curves

Analysis results are '(i/v)c_lower_j' and '(i/v)c_upper_j', with dimension lowered by 1 compared to i_j, v_j
Example: Getting Ic and Irt from a current bias 4 point measurement
-> set onWhat = "v", critVal = 5e-6 (whatever value properly cuts off random noise)

"""

_onWhat: Literal["i", "v", "di_dv", "dv_di"]
_critVal: float
_numderHelper: SavgolNumericalDerivative | None

def __init__(self, onWhat: Literal["i", "v", "di_dv", "dv_di"], critVal: float, numderHelper: SavgolNumericalDerivative | None = None):
super().__init__()
self._onWhat = onWhat
self._critVal = critVal
self._numderHelper = numderHelper
if onWhat in ["di_dv", "dv_di"]:
assert isinstance(numderHelper, SavgolNumericalDerivative), "When using crit detection on derivative of data, SavgolNumericalDerivative needs to be passed aswell"

@override
def perform_analysis(self, data: tuple['MeasurementTypeAdapter.GeneratedData', ...]) -> tuple['MeasurementTypeAdapter.GeneratedData', ...]:
parent_schema = tuple([element.descriptor for element in data])
output_schema = self.expected_structure(parent_schema)
out = []
flipBiasMeas = (parent_schema[0].name == "i_0") ^ (self._onWhat in ["v", "dvdi"])
if self._onWhat in ["di_dv", "dv_di"]:
data = self._numderHelper.perform_analysis(data)
for ((dxdy, dydx), (x, y)) in zip(itertools.batched(output_schema, 2), itertools.batched(data, 2)):
out.append(dxdy.with_data(signal.savgol_filter(x.data, **self.savgol_kwargs)/signal.savgol_filter(y.data, **self.savgol_kwargs)))
out.append(dydx.with_data(signal.savgol_filter(y.data, **self.savgol_kwargs)/signal.savgol_filter(x.data, **self.savgol_kwargs)))
return tuple(out)

def _crit_find_thresh(x_vals: np.ndarray, y_vals: np.ndarray, thresh: float = 1e-6) -> tuple[np.ndarray]:
"""
helper function for main threshold detection on y-vals, x-vals needed for sanity checking
data should be flipped & mirrored to ideally look like
^ x_vals (.), y_vals (x), crits (o)
| .x
| . x
| . x
| .
0 +------oxxxxxxxxxxxxxxxo-----> #idx
| x .
| x .
| x.
| .x
v
"""
# thresh detect
upper_idxs = np.argmax(np.logical_and(y_vals > thresh, x_vals > 0), axis=-1)
upper_idxs = np.where(np.any(np.logical_and(y_vals > thresh, x_vals > 0), axis=-1), upper_idxs, x_vals.shape[-1] - 1) # default to max if no tresh found
lower_idxs = y_vals.shape[-1] - 1 - np.argmax(np.flip(np.logical_and(y_vals > thresh, x_vals < 0), axis=-1), axis=-1) # flip because argmax returns first occurence
lower_idxs = np.where(np.any(np.logical_and(y_vals > thresh, x_vals < 0), axis=-1), lower_idxs, 0)
return upper_idxs, lower_idxs


@override
def expected_structure(self, parent_schema: tuple['MeasurementTypeAdapter.DataDescriptor', ...]) -> tuple['MeasurementTypeAdapter.DataDescriptor', ...]:
structure = []
for i, bias in enumerate(parent_schema[::2]):
structure += [
MeasurementTypeAdapter.DataDescriptor(
name=f"ic_upper_{i}" if self._onWhat in ["v", "dv_di"] else f"vc_upper_{i}",
unit=f"{bias.unit}",
axes=bias.axes[:-1],
category="analysis"
),
MeasurementTypeAdapter.DataDescriptor(
name=f"ic_lower_{i}" if self._onWhat in ["v", "dv_di"] else f"vc_lower_{i}",
unit=f"{bias.unit}",
axes=bias.axes[:-1],
category="analysis"
),
]
return tuple(structure)

@override
def default_views(self, parent_schema: tuple['MeasurementTypeAdapter.DataDescriptor', ...]) -> dict[str, DataView]:
return ()
54 changes: 30 additions & 24 deletions src/qkit/analysis/numerical_derivative.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import itertools
from typing import override
from typing import override, Any

from scipy import signal

Expand All @@ -16,15 +16,15 @@ class SavgolNumericalDerivative(AnalysisTypeAdapter):
Assumes that names are in the form of '[IV]_(?:b_)?_[0-9]'
"""

_window_length: int
_polyorder: int
_derivative: int
savgol_kwargs: dict[str, Any]

def __init__(self, window_length: int = 15, polyorder: int = 3, derivative: int = 1):
def __init__(self, **savgol_kwargs):
"""
savgol_kwargs:
kwargs to pass to scipy.signal.savgol_filter alongside data, refer to scipy docs for more information
"""
super().__init__()
self._window_length = window_length
self._polyorder = polyorder
self._derivative = derivative
self.savgol_kwargs = {"window_length": 15, "polyorder": 3, "deriv": 1} | savgol_kwargs

@override
def perform_analysis(self, data: tuple['MeasurementTypeAdapter.GeneratedData', ...]) -> tuple[
Expand All @@ -33,14 +33,8 @@ def perform_analysis(self, data: tuple['MeasurementTypeAdapter.GeneratedData', .
output_schema = self.expected_structure(parent_schema)
out = []
for ((dxdy, dydx), (x, y)) in zip(itertools.batched(output_schema, 2), itertools.batched(data, 2)):
out.append(dxdy.with_data(
signal.savgol_filter(x.data, window_length=self._window_length, polyorder=self._polyorder, deriv=self._derivative)\
/ signal.savgol_filter(y.data, window_length=self._window_length, polyorder=self._polyorder, deriv=self._derivative)
))
out.append(dydx.with_data(
signal.savgol_filter(y.data, window_length=self._window_length, polyorder=self._polyorder, deriv=self._derivative)\
/ signal.savgol_filter(x.data, window_length=self._window_length, polyorder=self._polyorder, deriv=self._derivative)
))
out.append(dxdy.with_data(signal.savgol_filter(x.data, **self.savgol_kwargs)/signal.savgol_filter(y.data, **self.savgol_kwargs)))
out.append(dydx.with_data(signal.savgol_filter(y.data, **self.savgol_kwargs)/signal.savgol_filter(x.data, **self.savgol_kwargs)))
return tuple(out)


Expand All @@ -52,13 +46,15 @@ def expected_structure(self, parent_schema: tuple['MeasurementTypeAdapter.DataDe
structure += [
MeasurementTypeAdapter.DataDescriptor(
name=f"d{x.name}_d{y.name}",
unit=f"{x.unit}_{y.unit}",
axes=x.axes
unit=f"{x.unit}/{y.unit}",
axes=x.axes,
category="analysis"
),
MeasurementTypeAdapter.DataDescriptor(
name=f"d{y.name}_d{x.name}",
unit=f"{y.unit}_{x.unit}",
axes=x.axes
unit=f"{y.unit}/{x.unit}",
axes=x.axes,
category="analysis"
)
]
return tuple(structure)
Expand All @@ -67,16 +63,26 @@ def expected_structure(self, parent_schema: tuple['MeasurementTypeAdapter.DataDe
def default_views(self, parent_schema: tuple['MeasurementTypeAdapter.DataDescriptor', ...]) -> dict[str, DataView]:
schema = self.expected_structure(parent_schema)
variable_names = (schema[0].name.split('_')[0], schema[1].name.split('_')[0])
return {
f'd{variable_names[0]}_d{variable_names[1]}': DataView(
return { # dx/dy
f'{variable_names[0]}_{variable_names[1]}': DataView(
view_params={
"labels": (schema[0].axes[0].name, f'{variable_names[0]}_{variable_names[1]}'),
'plot_style': 1,
'markersize': 5
},
view_sets=[
DataViewSet(
x_path=DataReference(entry.axes[0].name,),
y_path=DataReference(entry.name, category='analysis')
) for entry in schema[0::2]
]
),
f'd{variable_names[1]}_d{variable_names[0]}': DataView(
), # dy/dx
f'{variable_names[1]}_{variable_names[0]}': DataView(
view_params={
"labels": (schema[1].axes[0].name, f'{variable_names[1]}_{variable_names[0]}'),
'plot_style': 1,
'markersize': 5
},
view_sets=[
DataViewSet(
x_path=DataReference(entry.axes[0].name),
Expand Down
Loading