diff --git a/.gitignore b/.gitignore index f2e4e1b..3661626 100644 --- a/.gitignore +++ b/.gitignore @@ -107,3 +107,5 @@ venv.bak/ # VSCOde .vs* +flap_mdsplus/ +flap_nstx/ diff --git a/flap/__init__.py b/flap/__init__.py index b9e9128..4546166 100644 --- a/flap/__init__.py +++ b/flap/__init__.py @@ -1,5 +1,7 @@ global VERBOSE VERBOSE = True +if (VERBOSE): + print("Importing flap") from .tools import * from .coordinate import * from .data_object import * diff --git a/flap/data_object.py b/flap/data_object.py index cfc7103..5aaa15b 100644 --- a/flap/data_object.py +++ b/flap/data_object.py @@ -677,7 +677,7 @@ def _plot_error_ranges(self, index=None): else: return self.error[_index].flatten() - def _plot_coord_ranges(self,coord, c_data, c_low, c_high): + def _plot_coord_ranges(self, coord, c_data, c_low, c_high): """ Helper function to return error low and high limits from coordiniate data in the format needed by matplotlib """ @@ -1401,7 +1401,7 @@ def slice_data(self,slicing=None,summing=None,options=None): Dictionary with keys referring to coordinates and values as processing strings. If the processed coordinate changes along multiple dimensions those dimensions will be flattened. - For mean and avereage data errors are calculated as error of independent variables, that is taking the square + For mean and average data errors are calculated as error of independent variables, that is taking the square root of the squared sum of errors. For coordinates the mean of the ranges is taken. Processing strings are the following: @@ -1478,7 +1478,7 @@ def check_multi_slice(slicing, coord_dim): return False else: return True - raise TypeError("Invalid slicing description.") + raise TypeError("Invalid slicing description, its type is: "+str(type(slicing))) def simple_slice_index(slicing, slicing_coord, data_shape, _options): """ Returns index array which can be used for indexing the selected elements in the @@ -1513,8 +1513,8 @@ def simple_slice_index(slicing, slicing_coord, data_shape, _options): coord_obj.step[0]) if ((type(slicing) is flap.coordinate.Intervals) # Regular slice is possible only with a single interval - and ((slicing.step is None) and (len(slicing.start) == 1) or - (slicing.step is not None) and (slicing.number == 1))): + and ((slicing.step is None) and (len(slicing.start) == 1) or + (slicing.step is not None) and (slicing.number == 1))): if (slicing_coord.step[0] > 0): regular_slice = slice(slicing.start[0], slicing.stop[0], slicing_coord.step[0]) else: @@ -1599,7 +1599,7 @@ def simple_slice_index(slicing, slicing_coord, data_shape, _options): start_index = int(round((regular_slice.start - range_coord[0]) / abs(slicing_coord.step[0]))) step_index = int(round(regular_slice.step / abs(slicing_coord.step[0]))) stop_index = int((regular_slice.stop - range_coord[0]) / abs(slicing_coord.step[0])) - return slice(start_index, stop_index, step_index) + return slice(start_index, stop_index+1, step_index) else: # slice object could not be created for data array # Creating flattened coordinate data array @@ -3709,7 +3709,7 @@ def get_data_object(self,name,exp_id='*'): nlist.append(n) if len(nlist) == 0: raise KeyError("Data object " + name - + "(exp_id:" + str(exp_id) + ") does not exists.") + + "(exp_id:" + str(exp_id) + ") does not exist.") if (len(nlist) > 1): raise KeyError("Multiple data objects found for name " + name + "(exp_id:" + str(exp_id) + ").") @@ -3728,7 +3728,7 @@ def get_data_object_ref(self,name,exp_id='*'): nlist.append(n) if len(nlist) == 0: raise KeyError("Data object " + name - + "(exp_id:" + str(exp_id) + ") does not exists.") + + "(exp_id:" + str(exp_id) + ") does not exist.") if (len(nlist) > 1): raise KeyError("Multiple data objects found for name " + name + "(exp_id:" + str(exp_id) + ").") diff --git a/flap/plot.py b/flap/plot.py index b7eb700..999bd7d 100644 --- a/flap/plot.py +++ b/flap/plot.py @@ -27,19 +27,21 @@ - Constants are considered as unit-less, therefore they don't need forcing. """ +import os +import copy +from enum import Enum +import math +import time import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import matplotlib.colors as colors import matplotlib.animation as animation +import matplotlib.lines as mlines from matplotlib.widgets import Button, Slider from matplotlib import ticker import numpy as np -import copy -from enum import Enum -import math -import time try: import cv2 @@ -134,13 +136,14 @@ def axes_to_pdd_list(d,axes): class PlotAnimation: - def __init__(self, ax_list, axes, d, xdata, ydata, tdata, xdata_range, ydata_range, + def __init__(self, ax_list, axes, axes_unit_conversion, d, xdata, ydata, tdata, xdata_range, ydata_range, cmap_obj, contour_levels, coord_t, coord_x, - coord_y, cmap, options, xrange, yrange, + coord_y, cmap, options, overplot_options, xrange, yrange, zrange, image_like, plot_options, language, plot_id, gs): self.ax_list = ax_list - self.axes = axes + self.axes = axes + self.axes_unit_conversion=axes_unit_conversion self.contour_levels = contour_levels self.cmap = cmap self.cmap_obj = cmap_obj @@ -154,6 +157,7 @@ def __init__(self, ax_list, axes, d, xdata, ydata, tdata, xdata_range, ydata_ran self.image_like = image_like self.language = language self.options = options + self.overplot_options = overplot_options self.pause = False self.plot_id = plot_id self.plot_options = plot_options @@ -162,9 +166,16 @@ def __init__(self, ax_list, axes, d, xdata, ydata, tdata, xdata_range, ydata_ran self.tdata = tdata self.xdata = xdata self.ydata = ydata - - self.xdata_range = xdata_range - self.ydata_range = ydata_range + if xdata_range is not None: + self.xdata_range = [self.axes_unit_conversion[0] * xdata_range[0], + self.axes_unit_conversion[0] * xdata_range[1]] + else: + self.xdata_range=None + if ydata_range is not None: + self.ydata_range = [self.axes_unit_conversion[1] * ydata_range[0], + self.axes_unit_conversion[1] * ydata_range[1]] + else: + self.ydata_range=None self.xrange = xrange self.yrange = yrange @@ -176,26 +187,6 @@ def __init__(self, ax_list, axes, d, xdata, ydata, tdata, xdata_range, ydata_ran def animate(self): - #These lines do the coordinate unit conversion - self.axes_unit_conversion=np.zeros(len(self.axes)) - self.axes_unit_conversion[:]=1. - - if self.options['Plot units'] is not None: - unit_length=len(self.options['Plot units']) - if unit_length > 3: - raise ValueError('Only three units are allowed for the three coordinates.') - unit_conversion_coeff={} - for plot_unit_name in self.options['Plot units']: - for index_data_unit in range(len(self.d.coordinates)): - if (plot_unit_name == self.d.coordinates[index_data_unit].unit.name): - data_coordinate_unit=self.d.coordinates[index_data_unit].unit.unit - plot_coordinate_unit=self.options['Plot units'][plot_unit_name] - unit_conversion_coeff[plot_unit_name]=flap.tools.unit_conversion(original_unit=data_coordinate_unit, - new_unit=plot_coordinate_unit) - for index_axes in range(len(self.axes)): - if self.axes[index_axes] in self.options['Plot units']: - self.axes_unit_conversion[index_axes]=unit_conversion_coeff[self.axes[index_axes]] - pause_ax = plt.figure(self.plot_id.figure).add_axes((0.78, 0.94, 0.1, 0.04)) self.pause_button = Button(pause_ax, 'Pause', hovercolor='0.975') self.pause_button.on_clicked(self._pause_animation) @@ -223,26 +214,29 @@ def animate(self): valmax=self.tdata[-1]*self.axes_unit_conversion[2], valinit=self.tdata[0]*self.axes_unit_conversion[2]) self.time_slider.on_changed(self._set_animation) - - plt.subplot(self.plot_id.base_subplot) + + #The following line needed to be removed for matplotlib 3.4.1 + #plt.subplot(self.plot_id.base_subplot) self.ax_act = plt.subplot(self.gs[0,0]) - + if (len(self.coord_x.dimension_list) == 3 or + len(self.coord_y.dimension_list) == 3): + self.ax_act.set_autoscale_on(False) + + #The following lines set the axes to be equal if the units of the axes-to-be-plotted are the same - - axes_coordinate_decrypt=[0] * len(self.axes) - for i_axes in range(len(self.axes)): - for j_coordinate in range(len(self.d.coordinates)): - if (self.d.coordinates[j_coordinate].unit.name == self.axes[i_axes]): - axes_coordinate_decrypt[i_axes]=j_coordinate - for i_check in range(len(self.axes)) : - for j_check in range(i_check+1,len(self.axes)): - if (self.d.coordinates[axes_coordinate_decrypt[i_check]].unit.unit == - self.d.coordinates[axes_coordinate_decrypt[j_check]].unit.unit): - self.ax_act.axis('equal') + if self.options['Equal axes']: + axes_coordinate_decrypt=[0] * len(self.axes) + for i_axes in range(len(self.axes)): + for j_coordinate in range(len(self.d.coordinates)): + if (self.d.coordinates[j_coordinate].unit.name == self.axes[i_axes]): + axes_coordinate_decrypt[i_axes]=j_coordinate + for i_check in range(len(self.axes)) : + for j_check in range(i_check+1,len(self.axes)): + if (self.d.coordinates[axes_coordinate_decrypt[i_check]].unit.unit == + self.d.coordinates[axes_coordinate_decrypt[j_check]].unit.unit): + self.ax_act.set_aspect(1.0) - - time_index = [slice(0,dim) for dim in self.d.data.shape] time_index[self.coord_t.dimension_list[0]] = 0 time_index = tuple(time_index) @@ -255,13 +249,6 @@ def animate(self): np.nanmax(self.d.data)] self.vmin = self.zrange[0] self.vmax = self.zrange[1] - -# if (self.zrange is None): -# self.vmin = np.nanmin(self.d.data[time_index]) -# self.vmax = np.nanmax(self.d.data[time_index]) -# else: -# self.vmin = self.zrange[0] -# self.vmax = self.zrange[1] if (self.vmax <= self.vmin): @@ -272,6 +259,7 @@ def animate(self): raise ValueError("z range[0] cannot be negative or zero for logarithmic scale.") self.norm = colors.LogNorm(vmin=self.vmin, vmax=self.vmax) self.locator = ticker.LogLocator(subs='all') + ticker.LogLocator(subs='all') else: self.norm = None self.locator = None @@ -281,8 +269,6 @@ def animate(self): if (self.image_like): try: - #There is a problem here, but I cant find it. Image is rotated with 90degree here, but not in anim-image. - #if (self.coord_x.dimension_list[0] == 0): if (self.coord_x.dimension_list[0] < self.coord_y.dimension_list[0]): im = np.clip(np.transpose(self.d.data[time_index]),self.vmin,self.vmax) else: @@ -294,9 +280,9 @@ def animate(self): except Exception as e: raise e else: - if (len(self.xdata.shape) == 3 and len(self.xdata.shape) == 3): - #xgrid, ygrid = flap.tools.grid_to_box(self.xdata[0,:,:],self.ydata[0,:,:]) #Same issue, time is not necessarily the first flap.coordinate. - xgrid, ygrid = flap.tools.grid_to_box(self.xdata[0,:,:]*self.axes_unit_conversion[0],self.ydata[0,:,:]*self.axes_unit_conversion[1]) #Same issue, time is not necessarily the first flap.coordinate. + if (len(self.xdata.shape) == 3 and len(self.ydata.shape) == 3): + xgrid, ygrid = flap.tools.grid_to_box(self.xdata[time_index]*self.axes_unit_conversion[0], + self.ydata[time_index]*self.axes_unit_conversion[1]) else: xgrid, ygrid = flap.tools.grid_to_box(self.xdata*self.axes_unit_conversion[0], self.ydata*self.axes_unit_conversion[1]) @@ -311,204 +297,37 @@ def animate(self): self.xdata_range=None self.ydata_range=None + if (self.xrange is None): + self.xrange=[np.min(self.xdata),np.max(self.xdata)] + + plt.xlim(self.xrange[0]*self.axes_unit_conversion[0], + self.xrange[1]*self.axes_unit_conversion[0]) + + if (self.yrange is None): + self.yrange=[np.min(self.ydata),np.max(self.ydata)] + + plt.ylim(self.yrange[0]*self.axes_unit_conversion[1], + self.yrange[1]*self.axes_unit_conversion[1]) if (self.options['Colorbar']): cbar = plt.colorbar(img,ax=self.ax_act) cbar.set_label(self.d.data_unit.name) #EFIT overplot feature implementation: #It needs to be more generalied in the future as the coordinates are not necessarily in this order: [time_index,spat_index] - #This needs to be cross-checked with the time array's dimensions wherever there is a call for a certain index. - if ('EFIT options' in self.options and self.options['EFIT options'] is not None): - - default_efit_options={'Plot limiter': None, - 'Limiter X': None, - 'Limiter Y': None, - 'Limiter 2D': None, - 'Limiter color': 'white', - 'Plot separatrix': None, - 'Separatrix X': None, - 'Separatrix Y': None, - 'Separatrix 2D': None, - 'Separatrix color': 'red', - 'Plot flux': None, - 'Flux XY': None, - 'Flux nlevel': 51} - - self.efit_options=flap.config.merge_options(default_efit_options,self.options['EFIT options'],data_source=self.d.data_source) - self.efit_data={'limiter': {'Time':[],'Data':[]}, - 'separatrix':{'Time':[],'Data':[]}, - 'flux': {'Time':[],'Data':[]}} - - for setting in ['limiter','separatrix']: - if (self.efit_options['Plot '+setting]): - if (((self.efit_options[setting.capitalize()+' X']) and - (self.efit_options[setting.capitalize()+' Y' ])) or - (self.efit_options[setting.capitalize()+' 2D'])): - - if ((self.efit_options[setting.capitalize()+' X']) and - (self.efit_options[setting.capitalize()+' Y'])): - try: - R_object=flap.get_data_object(self.efit_options[setting.capitalize()+' X'],exp_id=self.d.exp_id) - except: - raise ValueError("The objects "+self.efit_options[setting.capitalize()+' X']+" cannot be read.") - try: - Z_object=flap.get_data_object(self.efit_options[setting.capitalize()+' Y'],exp_id=self.d.exp_id) - except: - raise ValueError("The objects "+self.efit_options[setting.capitalize()+' Y']+" cannot be read.") - if (len(R_object.data.shape) != 2 or len(Z_object.data.shape) != 2): - raise ValueError("The "+setting.capitalize()+' Y'+" data is not 1D. Use 2D or modify data reading.") - self.efit_data[setting]['Data']=np.asarray([R_object.data,Z_object.data]) - self.efit_data[setting]['Time']=R_object.coordinate('Time')[0][:,0] #TIME IS NOT ALWAYS THE FIRST COORDINATE, ELLIPSIS CHANGE SHOULD BE IMPLEMENTED - elif (self.efit_options[setting.capitalize()+' XY']): - try: - R_object=flap.get_data_object(self.efit_options[setting.capitalize()+' 2D'],exp_id=self.d.exp_id) - except: - raise ValueError(setting.capitalize()+' 2D data is not available. FLAP data object needs to be read first.') - if R_object.data.shape[2] == 2: - self.efit_data[setting]['Data']=np.asarray([R_object.data[:,:,0],R_object.data[:,:,1]]) - else: - raise ValueError(setting.capitalize()+' XY data needs to be in the format [n_time,n_points,2 (xy)') - self.efit_data[setting]['Time']=R_object.coordinate('Time')[0][:,0,0] #TIME IS NOT ALWAYS THE FIRST COORDINATE, ELLIPSIS CHANGE SHOULD BE IMPLEMENTED - else: - raise ValueError('Both '+setting.capitalize()+' X and'+ - setting.capitalize()+' Y or '+ - setting.capitalize()+' 2D need to be set.') - - for index_coordinate in range(len(self.d.coordinates)): - if ((self.d.coordinates[index_coordinate].unit.name in self.axes) and - (self.d.coordinates[index_coordinate].unit.name != 'Time')): - coordinate_index=index_coordinate - #Spatial unit translation (mm vs m usually) - if (R_object.data_unit.unit != self.d.coordinates[coordinate_index].unit.unit): - try: - coeff_efit_spatial=flap.tools.spatial_unit_translation(R_object.data_unit.unit) - except: - raise ValueError("Time unit translation cannot be made. Check the time unit of the object.") - try: - coeff_data_spatial=flap.tools.spatial_unit_translation(self.d.coordinates[coordinate_index].unit.unit) - except: - raise ValueError("Spatial unit translation cannot be made. Check the time unit of the object.") - #print('Spatial unit translation factor: '+str(coeff_efit_spatial/coeff_data_spatial)) - self.efit_data[setting]['Data'] *= coeff_efit_spatial/coeff_data_spatial - #Time translation (usually ms vs s) - for index_time in range(len(self.d.coordinates)): - if (self.d.coordinates[index_time].unit.name == 'Time'): - time_index_data=index_time - - for index_time in range(len(R_object.coordinates)): - if (self.d.coordinates[index_time].unit.name == 'Time'): - time_index_efit=index_time - - if (R_object.coordinates[time_index_efit].unit.unit != self.d.coordinates[time_index_data].unit.unit): - try: - coeff_efit_time=flap.tools.time_unit_translation(R_object.coordinates[time_index_efit].unit.unit) - except: - raise ValueError("Time unit translation cannot be made. Check the time unit of the object.") - try: - coeff_data_time=flap.tools.time_unit_translation(self.d.coordinates[time_index_data].unit.unit) - except: - raise ValueError("Time unit translation cannot be made. Check the time unit of the object.") - self.efit_data[setting]['Time'] *= coeff_efit_time/coeff_data_time - else: - raise ValueError(setting.capitalize()+' keywords are not set for the data objects.') - - #Interpolating EFIT data for the time vector of the data - if ((self.efit_data[setting]['Data'] != []) and (self.efit_data[setting]['Time'] != [])): - self.efit_data[setting]['Data resampled']=np.zeros([2,self.tdata.shape[0],self.efit_data[setting]['Data'].shape[2]]) - for index_xy in range(0,2): - for index_coordinate in range(0,self.efit_data[setting]['Data'].shape[2]): - self.efit_data[setting]['Data resampled'][index_xy,:,index_coordinate]=np.interp(self.tdata,self.efit_data[setting]['Time'], - self.efit_data[setting]['Data'][index_xy,:,index_coordinate]) - - if (self.efit_options['Plot flux']): - try: - flux_object=flap.get_data_object(self.efit_options['Flux XY'],exp_id=self.d.exp_id) - except: - raise ValueError('Flux XY data is not available. FLAP data object needs to be read first.') - if len(flux_object.data.shape) != 3: - raise ValueError('Flux XY data needs to be a 3D matrix (r,z,t), not necessarily in this order.') - if (flux_object.coordinates[0].unit.name != 'Time'): - raise ValueError('Time should be the first coordinate in the flux data object.') - self.efit_data['flux']['Data']=flux_object.data - self.efit_data['flux']['Time']=flux_object.coordinate('Time')[0][:,0,0] #TIME IS NOT ALWAYS THE FIRST COORDINATE, ELLIPSIS CHANGE SHOULD BE IMPLEMENTED - self.efit_data['flux']['X coord']=flux_object.coordinate(flux_object.coordinates[1].unit.name)[0] - self.efit_data['flux']['Y coord']=flux_object.coordinate(flux_object.coordinates[2].unit.name)[0] - - for index_coordinate in range(len(self.d.coordinates)): - if ((self.d.coordinates[index_coordinate].unit.name in self.axes) and - (self.d.coordinates[index_coordinate].unit.name != 'Time')): - coordinate_index=index_coordinate - #Spatial unit translation (mm vs m usually) - if (flux_object.data_unit.unit != self.d.coordinates[coordinate_index].unit.unit): - try: - coeff_efit_spatial=flap.tools.spatial_unit_translation(flux_object.data_unit.unit) - except: - raise ValueError("Time unit translation cannot be made. Check the time unit of the object.") - try: - coeff_data_spatial=flap.tools.spatial_unit_translation(self.d.coordinates[coordinate_index].unit.unit) - except: - raise ValueError("Spatial unit translation cannot be made. Check the time unit of the object.") - #print('Spatial unit translation factor: '+str(coeff_efit_spatial/coeff_data_spatial)) - self.efit_data['flux']['X coord'] *= coeff_efit_spatial/coeff_data_spatial - self.efit_data['flux']['Y coord'] *= coeff_efit_spatial/coeff_data_spatial - - #Time translation (usually ms vs s) - for index_time in range(len(self.d.coordinates)): - if (self.d.coordinates[index_time].unit.name == 'Time'): - time_index_data=index_time - - for index_time in range(len(flux_object.coordinates)): - if (flux_object.coordinates[index_time].unit.name == 'Time'): - time_index_efit=index_time - - if (flux_object.coordinates[time_index_efit].unit.unit != self.d.coordinates[time_index_data].unit.unit): - try: - coeff_efit_time=flap.tools.time_unit_translation(flux_object.coordinates[time_index_efit].unit.unit) - except: - raise ValueError("Time unit translation cannot be made. Check the time unit of the object.") - try: - coeff_data_time=flap.tools.time_unit_translation(self.d.coordinates[time_index_data].unit.unit) - except: - raise ValueError("Time unit translation cannot be made. Check the time unit of the object.") - self.efit_data['flux']['Time'] *= coeff_efit_time/coeff_data_time - - #Interpolating EFIT data for the time vector of the data - if ((self.efit_data['flux']['Data'] != []) and (self.efit_data['flux']['Time'] != [])): - self.efit_data['flux']['Data resampled']=np.zeros([self.tdata.shape[0], - self.efit_data['flux']['Data'].shape[1], - self.efit_data['flux']['Data'].shape[2]]) - self.efit_data['flux']['X coord resampled']=np.zeros([self.tdata.shape[0], - self.efit_data['flux']['X coord'].shape[1], - self.efit_data['flux']['X coord'].shape[2]]) - self.efit_data['flux']['Y coord resampled']=np.zeros([self.tdata.shape[0], - self.efit_data['flux']['Y coord'].shape[1], - self.efit_data['flux']['Y coord'].shape[2]]) - - for index_x in range(0,self.efit_data['flux']['Data'].shape[1]): - for index_y in range(0,self.efit_data['flux']['Data'].shape[2]): - self.efit_data['flux']['Data resampled'][:,index_x,index_y]=np.interp(self.tdata,self.efit_data['flux']['Time'], - self.efit_data['flux']['Data'][:,index_x,index_y]) - self.efit_data['flux']['X coord resampled'][:,index_x,index_y]=np.interp(self.tdata,self.efit_data['flux']['Time'], - self.efit_data['flux']['X coord'][:,index_x,index_y]) - self.efit_data['flux']['Y coord resampled'][:,index_x,index_y]=np.interp(self.tdata,self.efit_data['flux']['Time'], - self.efit_data['flux']['Y coord'][:,index_x,index_y]) - - - if (self.xrange is not None): - plt.xlim(self.xrange[0]*self.axes_unit_conversion[0],self.xrange[1]*self.axes_unit_conversion[0]) - - if (self.yrange is not None): - plt.ylim(self.yrange[0]*self.axes_unit_conversion[1],self.yrange[1]*self.axes_unit_conversion[1]) - + #This needs to be cross-checked with the time array's dimensions wherever there is a call for a certain index. + + if self.axes_unit_conversion[0] == 1.: plt.xlabel(self.ax_list[0].title(language=self.language)) else: - plt.xlabel(self.ax_list[0].title(language=self.language, new_unit=self.options['Plot units'][self.axes[0]])) + plt.xlabel(self.ax_list[0].title(language=self.language, + new_unit=self.options['Plot units'][self.axes[0]])) if self.axes_unit_conversion[1] == 1.: plt.ylabel(self.ax_list[1].title(language=self.language)) else: - plt.ylabel(self.ax_list[1].title(language=self.language, new_unit=self.options['Plot units'][self.axes[1]])) + plt.ylabel(self.ax_list[1].title(language=self.language, + new_unit=self.options['Plot units'][self.axes[1]])) if (self.options['Log x']): plt.xscale('log') @@ -525,7 +344,7 @@ def animate(self): else: time_unit=self.coord_t.unit.unit time_coeff=1. - title = str(self.d.exp_id)+' @ '+self.coord_t.unit.name+'='+"{:10.5f}".format(self.tdata[0]*time_coeff)+\ + title = str(self.d.exp_id)+' @ '+self.coord_t.unit.name+'='+"{:10.7f}".format(self.tdata[0]*time_coeff)+\ ' ['+time_unit+']' plt.title(title) @@ -546,13 +365,12 @@ def animate_plot(self, it): self.time_slider.eventson = True self.current_frame = it - + plot_opt = copy.deepcopy(self.plot_options[0]) self.ax_act.clear() if (self.image_like): try: -# if (self.coord_x.dimension_list[0] == 0): if (self.coord_x.dimension_list[0] < self.coord_y.dimension_list[0]): im = np.clip(np.transpose(self.d.data[time_index]),self.vmin,self.vmax) else: @@ -561,15 +379,22 @@ def animate_plot(self, it): cmap=self.cmap_obj,vmin=self.vmin, aspect=self.options['Aspect ratio'], interpolation=self.options['Interpolation'], - vmax=self.vmax,origin='lower',**plot_opt) + vmax=self.vmax,origin='lower',**plot_opt) del im except Exception as e: raise e else: + self.ax_act.set_autoscale_on(False) + self.ax_act.set_xlim(self.xrange[0]*self.axes_unit_conversion[0], + self.xrange[1]*self.axes_unit_conversion[0]) + self.ax_act.set_ylim(self.yrange[0]*self.axes_unit_conversion[1], + self.yrange[1]*self.axes_unit_conversion[1]) if (len(self.xdata.shape) == 3 and len(self.ydata.shape) == 3): - xgrid, ygrid = flap.tools.grid_to_box(self.xdata[time_index,:,:]*self.axes_unit_conversion[0],self.ydata[time_index,:,:]*self.axes_unit_conversion[1]) #Same issue, time is not necessarily the first flap.coordinate. + xgrid, ygrid = flap.tools.grid_to_box(self.xdata[time_index]*self.axes_unit_conversion[0], + self.ydata[time_index]*self.axes_unit_conversion[1]) #Same issue, time is not necessarily the first flap.coordinate. else: - xgrid, ygrid = flap.tools.grid_to_box(self.xdata*self.axes_unit_conversion[0],self.ydata*self.axes_unit_conversion[1]) + xgrid, ygrid = flap.tools.grid_to_box(self.xdata*self.axes_unit_conversion[0], + self.ydata*self.axes_unit_conversion[1]) im = np.clip(np.transpose(self.d.data[time_index]),self.vmin,self.vmax) try: plt.pcolormesh(xgrid,ygrid,im,norm=self.norm,cmap=self.cmap,vmin=self.vmin, @@ -577,31 +402,63 @@ def animate_plot(self, it): except Exception as e: raise e del im - if ('EFIT options' in self.options and self.options['EFIT options'] is not None): - for setting in ['limiter','separatrix']: - if (self.efit_options['Plot '+setting]): - self.ax_act.set_autoscale_on(False) - im = plt.plot(self.efit_data[setting]['Data resampled'][0,it,:], - self.efit_data[setting]['Data resampled'][1,it,:], - color=self.efit_options[setting.capitalize()+' color']) - - if (self.efit_options['Plot flux']): - self.ax_act.set_autoscale_on(False) - #im = plt.contour(self.efit_data['flux']['X coord'][it,:,:], - # self.efit_data['flux']['Y coord'][it,:,:], - # self.efit_data['flux']['Data resampled'][it,:,:], - # levels=self.efit_options['Flux nlevel']) - - im = plt.contour(self.efit_data['flux']['X coord resampled'][it,:,:].transpose(), - self.efit_data['flux']['Y coord resampled'][it,:,:].transpose(), - self.efit_data['flux']['Data resampled'][it,:,:], - levels=self.efit_options['Flux nlevel']) - - if (self.xrange is not None): - self.ax_act.set_xlim(self.xrange[0],self.xrange[1]) - if (self.yrange is not None): - self.ax_act.set_ylim(self.yrange[0],self.yrange[1]) - + + if (self.overplot_options is not None): + for path_obj_keys in self.overplot_options['path']: + if self.overplot_options['path'][path_obj_keys]['Plot']: + im = plt.plot(self.overplot_options['path'][path_obj_keys]['data']['Data resampled'][0,it,:]*self.axes_unit_conversion[0], + self.overplot_options['path'][path_obj_keys]['data']['Data resampled'][1,it,:]*self.axes_unit_conversion[1], + color=self.overplot_options['path'][path_obj_keys]['Color']) + + for contour_obj_keys in self.overplot_options['contour']: + if self.overplot_options['contour'][contour_obj_keys]['Plot']: + im = plt.contour(self.overplot_options['contour'][contour_obj_keys]['data']['X coord resampled'][it,:,:].transpose()*self.axes_unit_conversion[0], + self.overplot_options['contour'][contour_obj_keys]['data']['Y coord resampled'][it,:,:].transpose()*self.axes_unit_conversion[1], + self.overplot_options['contour'][contour_obj_keys]['data']['Data resampled'][it,:,:], + levels=self.overplot_options['contour'][contour_obj_keys]['nlevel'], + cmap=self.overplot_options['contour'][contour_obj_keys]['Colormap']) + + for contour_obj_keys in self.overplot_options['arrow']: + if self.overplot_options['arrow'][contour_obj_keys]['Plot']: + + time_index = [slice(0,dim) for dim in self.overplot_options['arrow'][contour_obj_keys]['data']['Data X'].shape] + time_index[self.overplot_options['arrow'][contour_obj_keys]['data']['Time dimension']] = it + time_index = tuple(time_index) + + x_coords=self.overplot_options['arrow'][contour_obj_keys]['data']['X coord'][time_index].flatten()*self.axes_unit_conversion[0] + y_coords=self.overplot_options['arrow'][contour_obj_keys]['data']['Y coord'][time_index].flatten()*self.axes_unit_conversion[1] + data_x=self.overplot_options['arrow'][contour_obj_keys]['data']['Data X'][time_index].flatten()*self.axes_unit_conversion[0] + data_y=self.overplot_options['arrow'][contour_obj_keys]['data']['Data Y'][time_index].flatten()*self.axes_unit_conversion[1] + + for i_coord in range(len(x_coords)): + im = plt.arrow(x_coords[i_coord], + y_coords[i_coord], + data_x[i_coord], + data_y[i_coord], + width=self.overplot_options['arrow'][contour_obj_keys]['width'], + color=self.overplot_options['arrow'][contour_obj_keys]['color'], + length_includes_head=True, + ) + + for line_obj_keys in self.overplot_options['line']: + xmin, xmax = self.ax_act.get_xbound() + ymin, ymax = self.ax_act.get_ybound() + if self.overplot_options['line'][line_obj_keys]['Plot']: + + if 'Horizontal' in self.overplot_options['line'][line_obj_keys]: + h_coords=self.overplot_options['line'][line_obj_keys]['Horizontal'] + for segments in h_coords: + if segments[0] > ymin and segments[0] < ymax: + l = mlines.Line2D([xmin,xmax], [segments[0],segments[0]], color=segments[1]) + self.ax_act.add_line(l) + + if 'Vertical' in self.overplot_options['line'][line_obj_keys]: + v_coords=self.overplot_options['line'][line_obj_keys]['Vertical'] + for segments in v_coords: + if segments[0] > xmin and segments[0] < xmax: + l = mlines.Line2D([segments[0],segments[0]], [ymin,ymax], color=segments[1]) + self.ax_act.add_line(l) + if self.axes_unit_conversion[0] == 1.: plt.xlabel(self.ax_list[0].title(language=self.language)) else: @@ -616,11 +473,14 @@ def animate_plot(self, it): if self.axes[2] in self.options['Plot units']: time_unit=self.options['Plot units'][self.axes[2]] time_coeff=self.axes_unit_conversion[2] + else: + time_unit=self.coord_t.unit.unit + time_coeff=1. else: time_unit=self.coord_t.unit.unit time_coeff=1. - title = str(self.d.exp_id)+' @ '+self.coord_t.unit.name+'='+"{:10.5f}".format(self.tdata[it]*time_coeff)+\ + title = str(self.d.exp_id)+' @ '+self.coord_t.unit.name+'='+"{:10.7f}".format(self.tdata[it]*time_coeff)+\ ' ['+time_unit+']' self.ax_act.set_title(title) @@ -629,7 +489,7 @@ def _reset_animation(self, event): self.anim.event_source.stop() self.speed = 40. self.anim = animation.FuncAnimation(plt.figure(self.plot_id.figure), self.animate_plot, - len(self.tdata),interval=self.speed,blit=False) + len(self.tdata),interval=self.speed,blit=False) self.anim.event_source.start() self.pause = False @@ -708,12 +568,6 @@ def clear(self): self.plot_data = [] self.plt_axis_list = None self.options = [] - - -# def __del__(self): -# if (self.plt_axis_list is not None): -# for ax in self.plt_axis_list: -# ax.remove() def check_axes(self,d, axes, clear=True, default_axes=None, force=False): """ Checks whether the required plot axes are correct, present and compatible with the self PlotID. @@ -779,9 +633,6 @@ def check_axes(self,d, axes, clear=True, default_axes=None, force=False): if (not clear and (self.axes is not None)): for ax_in,ax_plot in zip(ax_list, self.axes): # If none of the plot and the new axis has name and ot unit, the new axis will not have it either -# if ((ax_plot.name == '') or (ax_plot.unit == '') or -# (ax_in.name == '') or (ax_in.unit == '')): -# ax_out_list.append(Unit()) if ((ax_in.name != ax_plot.name) or (ax_in.unit != ax_plot.unit)): if (force): u = flap.coordinate.Unit() @@ -846,8 +697,7 @@ def __get_gca_invalid(): gca_invalid except NameError: gca_invalid = False - return gca_invalid - + return gca_invalid def sample_for_plot(x,y,x_error,y_error,n_points): """ @@ -974,29 +824,100 @@ def _plot(data_object, 'Waittime' : Time to wait [Seconds] between two images in anim-... type plots 'Video file': Name of output video file for anim-... plots 'Video framerate': Frame rate for output video. - 'Video format': Format of the video. Valid: 'avi' + 'Video format': Format of the video. Valid: 'avi' (windows,macOS,linux), + 'mkv', 'mp4' (macOS,linux) + Note for macOS: mp4 is the only embeddable in Powerpoint. 'Clear': Boolean. If True don't use the existing plots, generate new. (No overplotting.) 'Force axes': Force overplotting even if axes are incpomatible 'Colorbar': Boolelan. Switch on/off colorbar 'Nan color': The color to use in image data plotting for np.nan (not-a-number) values 'Interpolation': Interpolation method for image plot. 'Language': Language of certain standard elements in the plot. ('EN', 'HU') - 'EFIT options': Dictionary of EFIT plotting options: - 'Plot separatrix': Set to plot the separatrix onto the video - 'Separatrix X': Name of the flap.DataObject for the separatrix X data (usually R) - 'Separatrix Y': Name of the flap.DataObject for the separatrix Y data (usually z) - 'Separatrix XY': Name of the 2D flap.DataObject for the separatrix XY data (usually Rz) - 'Separatrix color': Color of the separatrix for the plot - 'Plot limiter': Set to plot the limiter onto the video - 'Limiter X': Name of the flap.DataObject for the limiter X data (usually R) - 'Limiter Y': Name of the flap.DataObject for the limiter Y data (usually z) - 'Limiter XY': Name of the 2D flap.DataObject for the limiter XY data (usually Rz) - 'Limiter color': Color of the limiter for the plot - 'Plot flux surfaces': Name of the 2D flap.DataObject for the flux surfaces - (should have the same coordinate names as the plot) - 'nlevels': Number of contour lines for the flux surface plotting + 'Overplot options': Dictionary of overplotting options: + Path, contour, arrow or line overplotting for the different plot-types: + Dictionary with keys of 'contour', 'path' or 'line': + + Example options['Overplot options']['contour']: + + options['Overplot options']['contour']=\ + {'CNAME':{'Data object':None, #2D spatial FLAP object name + 'Plot':False, #Boolean for plotting + 'Colormap':None, #Colormap for the contour + 'nlevel':51, #Level for the contour + 'Slicing':None, #Slicing for the FLAP object + }} + Can contain multiple CNAME (contour name) keywords, each is + plotted. Works in 2D plots. Should be in the same time unit as + the data is in the animations. + + Example options['Overplot options']['path']: + + options['Overplot options']['path']=\ + {'PNAME':{'Data object X':None, #1D spatial FLAP object name + 'Data object Y':None, #1D spatial FLAP object name + 'Plot':False, #BOOlean for plotting + 'Color':'black', #Color of the path + 'Slicing':None, #Slicing for the FLAP object + }} + Can contain multiple PNAME (path name) keyword, each is + plotted. Works in 2D plots. Should be in the same time unit as + the data is in the animations. + + Example options['Overplot options']['line']: + + options['Overplot options']['path']=\ + {'LNAME':{'Vertical':[X_coord,'red'], #[X coordinate in the unit of the plot, plot color] + 'Horizontal':[Y_coord,'blue'], #[Y coordinate in the unit of the plot, plot color] + 'Plot':False, + }} + Can contain multiple LNAME (line name) keywords. If 'Vertical' + keyword is present, a line is vertical plotted at X_coord. + If 'Horizontal' keyword is present, a line is plottad at Y_coord. + Works in all plot types. + + Example options['Overplot options']['arrow']: + + options['Overplot options']['arrow']=\ + {'ANAME':{'Data object X':'X_OBJ', + 'Data object Y':'Y_OBJ', + 'Plot':True, + 'Slicing':{'Time':flap.Intervals(t1,t2)} + 'width':0.0001, + 'color':'red', + }} + Can contain multiple ANAME keywords. X_OBJ and Y_OBJ should contain + the same coordinates for the start of the arrow. The length of the + arrow is determined by the data in the two objects and the unit + conversion factors. The head of the arrow is 3 times the width. + Generally this feature is useful for velocimetry plotting. + + + options['Overplot options']['path']=\ + {'ANAME':{'Vertical':[X_coord,'red'], #[X coordinate in the unit of the plot, plot color] + 'Horizontal':[Y_coord,'blue'], #[Y coordinate in the unit of the plot, plot color] + 'Plot':False, + }} + Can contain multiple LNAME (line name) keywords. If 'Vertical' + keyword is present, a line is vertical plotted at X_coord. + If 'Horizontal' keyword is present, a line is plottad at Y_coord. + Works in all plot types. + 'Prevent saturation': Prevents saturation of the video signal when it exceeds zrange[1] It uses data modulo zrange[1] to overcome the saturation. (works for animation) + (default: False) + Caveat: doubles the memory usage of the plotted dataset during the plotting time. + 'Plot units': The plotting units for each axis. It can be a dictionary or a list. Its + use is different for the two cases as such: + Dictionary input: keywords: axis name (e.g. 'Device R'), value: unit (e.g. 'mm') + The number of keys is not limited. The ones from the axes will be used. + e.g.: options['Plot units']={'Device R':'mm', 'Device z':'mm', 'Time':'ms'} + List input: the number of values should correspond to the axes input as such: + e.g.: axes=['Device R','Device z','Time'] --> options['Plot units']=['mm','mm','ms'] + 'Equal axes': IF the units of the x and y axes coordinates match, it makes the plot's + axes to have equal spacing. Doesn't care about the plot units, just data units. + (default: False) + 'Axes visibility': Hides the title and labels of the axes if set to [False,False]. First + value is for the X axis and the second is for the Y axis. (default: [True,True]) """ @@ -1006,8 +927,10 @@ def _plot(data_object, 'Clear':False,'Force axes':False,'Language':'EN','Maxpoints': 4000, 'Levels': 10, 'Colormap':None, 'Waittime':1,'Colorbar':True,'Nan color':None, 'Interpolation':'bilinear','Video file':None, 'Video framerate': 20,'Video format':'avi', - 'EFIT options':None, 'Prevent saturation':False, 'Plot units':None, + 'Overplot options':None, 'Prevent saturation':False, 'Plot units':None, + 'Equal axes':False, 'Axes visibility':[True,True], } + _options = flap.config.merge_options(default_options, options, data_source=data_object.data_source, section='Plot') if (plot_options is None): @@ -1022,11 +945,21 @@ def _plot(data_object, raise TypeError("Invalid type for option Clear. Should be boolean.") if (type(_options['Force axes']) is not bool): raise TypeError("Invalid type for option 'Force axes'. Should be boolean.") - - if ((slicing is not None) or (summing is not None)): - d = data_object.slice_data(slicing=slicing, summing=summing, options=slicing_options) + + if (_options['Z range'] is not None) and (_options['Prevent saturation']): + if (_options['Z range'] is not None): + if ((type(_options['Z range']) is not list) or (len(_options['Z range']) != 2)): + raise ValueError("Invalid Z range setting.") + if ((slicing is not None) or (summing is not None)): + d = copy.deepcopy(data_object.slice_data(slicing=slicing, summing=summing, options=slicing_options)) + else: + d = copy.deepcopy(data_object) + d.data=np.mod(d.data,_options['Z range'][1]) else: - d = data_object + if ((slicing is not None) or (summing is not None)): + d = data_object.slice_data(slicing=slicing, summing=summing, options=slicing_options) + else: + d = data_object # Determining a PlotID: # argument, actual or a new one @@ -1052,7 +985,6 @@ def _plot(data_object, plt.cla() if (_options['Clear'] ): _plot_id = PlotID() -# _plot_id.clear() if (_plot_id.figure is not None): plt.figure(_plot_id.figure) else: @@ -1121,17 +1053,369 @@ def _plot(data_object, language = _options['Language'] if (_options['Video file'] is not None): - if (_options['Video format'] == 'avi'): - video_codec_code = 'XVID' - else: - raise ValueError("Cannot write video in format '"+_options['Video format']+"'.") + if ((os.sys.platform == 'darwin' or 'linux' in os.sys.platform) and + (options['Video format'] not in ['avi','mkv', 'mp4'])): + raise ValueError("The chosen video format is not cupported on macOS.") + if os.sys.platform == 'win32' and _options['Video format'] != 'avi': + raise ValueError("The chosen video format is not cupported on Windows.") + video_codec_decrypt={'avi':'XVID', + 'mkv':'X264', + 'mp4':'mp4v'} + video_codec_code=video_codec_decrypt[_options['Video format']] + print('Forcing waittime to be 0s for video saving.') + _options['Waittime']=0. + + #These lines do the coordinate unit conversion + axes_unit_conversion=[1.,1.,1.] + + if _options['Plot units'] is not None: + unit_length=len(_options['Plot units']) + if unit_length > 3: + raise ValueError('Only three units are allowed for the three coordinates.') + #For some reason the input list or dictionary can morph into a list or a dict class + possible_types=[list,dict,'',''] + if not (type(_options['Plot units']) in possible_types): + raise TypeError('The \'Plot units\' option needs to be either a dictionary or a list.') + #Converting the input list into dictionary: + if (type(_options['Plot units']) is list or + type(_options['Plot units']) == ''): + temp_units= _options['Plot units'][:] + _options['Plot units']={} + for index_axes in range(len(axes)): + _options['Plot units'][axes[index_axes]]=temp_units[index_axes] + #Finding the corresponding data coordinate to the input unit conversion and converting + unit_conversion_coeff={} + for plot_unit_name in _options['Plot units']: + for index_data_unit in range(len(d.coordinates)): + if (plot_unit_name == d.coordinates[index_data_unit].unit.name): + data_coordinate_unit=d.coordinates[index_data_unit].unit.unit + plot_coordinate_unit=_options['Plot units'][plot_unit_name] + unit_conversion_coeff[plot_unit_name]=flap.tools.unit_conversion(original_unit=data_coordinate_unit, + new_unit=plot_coordinate_unit) + #Saving the coefficients in the same order as the axes are + for index_axes in range(len(axes)): + if axes[index_axes] in _options['Plot units']: + axes_unit_conversion[index_axes]=unit_conversion_coeff[axes[index_axes]] + + #overplot_available_plot_types=['image', 'anim-image','contour','anim-contour','animation'] + overplot_options=None + if _options['Overplot options'] is not None: - + default_overplot_options={'contour':{'NAME':{'Data object':None, + 'Plot':False, + 'Colormap':None, + 'nlevel':51, + 'Slicing':None, + 'Arrow':False, + }}, + + 'path':{'NAME':{'Data object X':None, + 'Data object Y':None, + 'Plot':False, + 'Color':'black', + 'Slicing':None, + }}, + + 'line':{'NAME':{'Vertical':[0,'red'], + 'Horizontal':[1,'blue'], + 'Plot':False, + }}, + + 'arrow':{'NAME':{'Data object X':None, + 'Data object Y':None, + 'Plot':False, + 'Slicing':None, + 'width':0.5, + 'color':'red', + }}, + } + + overplot_options=flap.config.merge_options(default_overplot_options,_options['Overplot options'],data_source=data_object.data_source) + + #TIME (AXES(2)) INDEPENDENT OVERPLOTTING OBJECT CREATION + if plot_type in ['image', 'contour']: + + for path_obj_keys in overplot_options['path']: + try: + x_object_name=overplot_options['path'][path_obj_keys]['Data object X'] + x_object=flap.get_data_object_ref(x_object_name,exp_id=d.exp_id) + except: + raise ValueError("The object "+x_object_name+" cannot be read.") + try: + y_object_name=overplot_options['path'][path_obj_keys]['Data object Y'] + y_object=flap.get_data_object_ref(y_object_name,exp_id=d.exp_id) + except: + raise ValueError("The object "+y_object_name+" cannot be read.") + #Error handling + + if 'Slicing' in overplot_options['path'][path_obj_keys]: + try: + x_object=x_object.slice_data(slicing=overplot_options['path'][path_obj_keys]['Slicing']) + y_object=y_object.slice_data(slicing=overplot_options['path'][path_obj_keys]['Slicing']) + except: + raise ValueError('Slicing did not succeed. Try it outside the plotting!') + + if (len(x_object.data.shape) != 1 or len(y_object.data.shape) != 1): + raise ValueError("The "+overplot_options['path'][path_obj_keys]['Data object X']+' or ' + "the "+overplot_options['path'][path_obj_keys]['Data object Y']+" data is not 1D. Use slicing.") + + unit_conversion_coeff=[1]*2 + original_units=[x_object.data_unit.unit,y_object.data_unit.unit] + for index_coordinate in range(len(d.coordinates)): + for index_axes in range(2): + if (d.coordinates[index_coordinate].unit.name == axes[index_axes]): + unit_conversion_coeff[index_axes] = flap.tools.unit_conversion(original_unit=original_units[index_axes], + new_unit=d.coordinates[index_coordinate].unit.unit) + + overplot_options['path'][path_obj_keys]['data']={} + overplot_options['path'][path_obj_keys]['data']['Data']=np.asarray([x_object.data*unit_conversion_coeff[0], + y_object.data*unit_conversion_coeff[1]]) + + for contour_obj_keys in overplot_options['contour']: + if (overplot_options['contour'][contour_obj_keys]['Plot']): + try: + xy_object_name=overplot_options['contour'][contour_obj_keys]['Data object'] + xy_object=flap.get_data_object(xy_object_name,exp_id=d.exp_id) + except: + raise ValueError(xy_object_name+'data is not available. The data object needs to be read first.') + if 'Slicing' in overplot_options['contour'][path_obj_keys]: + try: + xy_object.slice_data(slicing=overplot_options['contour'][path_obj_keys]['Slicing']) + except: + raise ValueError('Slicing did not succeed. Try it outside the plotting!') + + if len(xy_object.data.shape) != 2: + raise ValueError('Contour object\'s, ('+xy_object_name+') data needs to be a 2D matrix.') + + unit_conversion_coeff=[1.] * 2 + for index_data_coordinate in range(len(d.coordinates)): + for index_oplot_coordinate in range(len(xy_object.coordinates)): + for index_axes in range(2): + if ((d.coordinates[index_data_coordinate].unit.name == axes[index_axes]) and + (xy_object.coordinates[index_oplot_coordinate].unit.name) == axes[index_axes]): + unit_conversion_coeff[index_axes] = flap.tools.unit_conversion(original_unit=xy_object.coordinates[index_oplot_coordinate].unit.unit, + new_unit=d.coordinates[index_data_coordinate].unit.unit) + + overplot_options['contour'][contour_obj_keys]['data']={} + overplot_options['contour'][contour_obj_keys]['data']['Data']=xy_object.data + overplot_options['contour'][contour_obj_keys]['data']['X coord']=xy_object.coordinate(axes[0])[0]*unit_conversion_coeff[0] + overplot_options['contour'][contour_obj_keys]['data']['Y coord']=xy_object.coordinate(axes[1])[0]*unit_conversion_coeff[1] + + for contour_obj_keys in overplot_options['arrow']: + if (overplot_options['arrow'][contour_obj_keys]['Plot']): + try: + x_object_name=overplot_options['arrow'][contour_obj_keys]['Data object X'] + x_object=flap.get_data_object(x_object_name,exp_id=d.exp_id) + + y_object_name=overplot_options['arrow'][contour_obj_keys]['Data object Y'] + y_object=flap.get_data_object(y_object_name,exp_id=d.exp_id) + except: + raise ValueError(x_object_name+' or '+y_object_name+' data is not available. The data object needs to be read first.') + + if 'Slicing' in overplot_options['arrow'][path_obj_keys]: + try: + x_object.slice_data(slicing=overplot_options['arrow'][path_obj_keys]['Slicing']) + y_object.slice_data(slicing=overplot_options['arrow'][path_obj_keys]['Slicing']) + except: + raise ValueError('Slicing did not succeed. Try it outside the plotting!') + + if len(x_object.data.shape) != 2 or len(y_object.data.shape): + raise ValueError('Arrow object\'s, ('+x_object_name+','+y_object_name+') data need to be a 2D matrix.') + if (x_object.coordinate(axes[0])[0] != y_object.coordinate(axes[0])[0] or + x_object.coordinate(axes[1])[0] != y_object.coordinate(axes[1])[0]): + raise ValueError('The x or y coordinates of the x and y arrow objects do not match.') + + unit_conversion_coeff=[1.] * 2 + for index_data_coordinate in range(len(d.coordinates)): + for index_oplot_coordinate in range(len(x_object.coordinates)): + for index_axes in range(2): + if ((d.coordinates[index_data_coordinate].unit.name == axes[index_axes]) and + (x_object.coordinates[index_oplot_coordinate].unit.name) == axes[index_axes]): + unit_conversion_coeff[index_axes] = flap.tools.unit_conversion(original_unit=x_object.coordinates[index_oplot_coordinate].unit.unit, + new_unit=d.coordinates[index_data_coordinate].unit.unit) + + overplot_options['arrow'][contour_obj_keys]['data']={} + overplot_options['arrow'][contour_obj_keys]['data']['Data X']=x_object.data*unit_conversion_coeff[0] + overplot_options['arrow'][contour_obj_keys]['data']['Data Y']=y_object.data*unit_conversion_coeff[1] + overplot_options['arrow'][contour_obj_keys]['data']['X coord']=x_object.coordinate(axes[0])[0]*unit_conversion_coeff[0] + overplot_options['arrow'][contour_obj_keys]['data']['Y coord']=y_object.coordinate(axes[1])[0]*unit_conversion_coeff[1] + + #TIME (AXES(2)) DEPENDENT OVERPLOTTING OBJECT CREATION + if plot_type in ['anim-image','anim-contour','animation'] : + for index_time in range(len(d.coordinates)): + if (d.coordinates[index_time].unit.name == axes[2]): + if len(d.coordinates[index_time].dimension_list) != 1: + raise ValueError('The time coordinate is changing along multiple dimensions.') + time_dimension_data=d.coordinates[index_time].dimension_list[0] + data_time_index_coordinate=index_time + + time_index=[0]*len(d.coordinate(axes[2])[0].shape) + time_index[time_dimension_data]=Ellipsis + tdata=d.coordinate(axes[2])[0][tuple(time_index)] + + for path_obj_keys in overplot_options['path']: + if (overplot_options['path'][path_obj_keys]['Plot']): + try: + x_object_name=overplot_options['path'][path_obj_keys]['Data object X'] + x_object=flap.get_data_object_ref(x_object_name,exp_id=d.exp_id) + except: + raise ValueError("The object "+x_object_name+" cannot be read.") + try: + y_object_name=overplot_options['path'][path_obj_keys]['Data object Y'] + y_object=flap.get_data_object_ref(y_object_name,exp_id=d.exp_id) + except: + raise ValueError("The object "+y_object_name+" cannot be read.") + #Error handling + if (len(x_object.data.shape) != 2 or len(y_object.data.shape) != 2): + raise ValueError("The "+overplot_options['path'][path_obj_keys]['Data object X']+' or ' + "the "+overplot_options['path'][path_obj_keys]['Data object Y']+" data is not 1D.") + + for index_coordinate in range(len(x_object.coordinates)): + if (x_object.coordinates[index_coordinate].unit.name == axes[2]): + oplot_x_time_index_coordinate=index_coordinate + for index_coordinate in range(len(x_object.coordinates)): + if (x_object.coordinates[index_coordinate].unit.name == axes[2]): + oplot_y_time_index_coordinate=index_coordinate + + if (d.coordinates[data_time_index_coordinate].unit.unit != x_object.coordinates[oplot_x_time_index_coordinate].unit.unit or + d.coordinates[data_time_index_coordinate].unit.unit != x_object.coordinates[oplot_y_time_index_coordinate].unit.unit): + raise TypeError('The '+axes[2]+' unit of the overplotted contour object differs from the data\'s. Interpolation cannot be done.') + + #Interpolate the path data to the time vector of the original data + try: + x_object_interp=flap.slice_data(x_object_name, + exp_id=d.exp_id, + slicing={axes[2]:tdata}, + options={'Interpolation':'Linear'}, + output_name='X OBJ INTERP') + except: + raise ValueError('Interpolation cannot be done for the \'Data object X\' along axis '+axes[2]+'.') + try: + y_object_interp=flap.slice_data(y_object_name, + exp_id=d.exp_id, + slicing={axes[2]:tdata}, + options={'Interpolation':'Linear'}, + output_name='Y OBJ INTERP') + except: + raise ValueError('Interpolation cannot be done for the \'Data object Y\' along axis '+axes[2]+'.') + + #Finding the time coordinate + for index_time in range(len(x_object.coordinates)): + if (x_object.coordinates[index_time].unit.name == axes[2]): + time_dimension_oplot=x_object.coordinates[index_time].dimension_list[0] + + #Convert the units of the path if its coordinates are not the same as the data object's. + unit_conversion_coeff=[1]*2 + original_units=[x_object.data_unit.unit,y_object.data_unit.unit] + for index_coordinate in range(len(d.coordinates)): + for index_axes in range(2): + if (d.coordinates[index_coordinate].unit.name == axes[index_axes]): + unit_conversion_coeff[index_axes] = flap.tools.unit_conversion(original_unit=original_units[index_axes], + new_unit=d.coordinates[index_coordinate].unit.unit) + overplot_options['path'][path_obj_keys]['data']={} + overplot_options['path'][path_obj_keys]['data']['Data resampled']=np.asarray([x_object_interp.data*unit_conversion_coeff[0], + y_object_interp.data*unit_conversion_coeff[1]]) + overplot_options['path'][path_obj_keys]['data']['Time dimension']=time_dimension_oplot + + for contour_obj_keys in overplot_options['contour']: + if (overplot_options['contour'][contour_obj_keys]['Plot']): + try: + xy_object_name=overplot_options['contour'][contour_obj_keys]['Data object'] + xy_object=flap.get_data_object(xy_object_name,exp_id=d.exp_id) + except: + raise ValueError(xy_object_name+'data is not available. The data object needs to be read first.') + if len(xy_object.data.shape) != 3: + raise ValueError('Contour object\'s, ('+xy_object_name+') data needs to be a 3D matrix (x,y,t), not necessarily in this order.') + + for index_coordinate in range(len(xy_object.coordinates)): + if (xy_object.coordinates[index_coordinate].unit.name == axes[2]): + time_dimension_index=xy_object.coordinates[index_coordinate].dimension_list[0] + oplot_time_index_coordinate=index_coordinate + + if d.coordinates[data_time_index_coordinate].unit.unit != xy_object.coordinates[oplot_time_index_coordinate].unit.unit: + raise TypeError('The '+axes[2]+' unit of the overplotted contour object differs from the data\'s. Interpolation cannot be done.') + + unit_conversion_coeff=[1.] * 3 + for index_data_coordinate in range(len(d.coordinates)): + for index_oplot_coordinate in range(len(xy_object.coordinates)): + for index_axes in range(3): + if ((d.coordinates[index_data_coordinate].unit.name == axes[index_axes]) and + (xy_object.coordinates[index_oplot_coordinate].unit.name) == axes[index_axes]): + unit_conversion_coeff[index_axes] = flap.tools.unit_conversion(original_unit=xy_object.coordinates[index_oplot_coordinate].unit.unit, + new_unit=d.coordinates[index_data_coordinate].unit.unit) + + xy_object_interp=flap.slice_data(xy_object_name, + exp_id=d.exp_id, + slicing={axes[2]:tdata}, + options={'Interpolation':'Linear'}, + output_name='XY OBJ INTERP') + + overplot_options['contour'][contour_obj_keys]['data']={} + overplot_options['contour'][contour_obj_keys]['data']['Data resampled']=xy_object_interp.data + overplot_options['contour'][contour_obj_keys]['data']['X coord resampled']=xy_object_interp.coordinate(axes[0])[0]*unit_conversion_coeff[0] + overplot_options['contour'][contour_obj_keys]['data']['Y coord resampled']=xy_object_interp.coordinate(axes[1])[0]*unit_conversion_coeff[1] + overplot_options['contour'][contour_obj_keys]['data'][axes[2]]=tdata + overplot_options['contour'][contour_obj_keys]['data']['Time dimension']=time_dimension_index + + for contour_obj_keys in overplot_options['arrow']: + if (overplot_options['arrow'][contour_obj_keys]['Plot']): + #try: + if True: + x_object_name=overplot_options['arrow'][contour_obj_keys]['Data object X'] + x_object=flap.get_data_object(x_object_name,exp_id=d.exp_id) + y_object_name=overplot_options['arrow'][contour_obj_keys]['Data object Y'] + y_object=flap.get_data_object(y_object_name,exp_id=d.exp_id) + #except: + # raise ValueError(x_object_name+' or '+y_object_name+' data is not available. The data object needs to be read first.') + if len(x_object.data.shape) != 3 or len(y_object.data.shape) != 3: + raise ValueError('Arrow object\'s, ('+x_object_name+','+y_object_name+') data needs to be a 3D matrix (x,y,t), not necessarily in this order.') + + for index_coordinate in range(len(x_object.coordinates)): + if (x_object.coordinates[index_coordinate].unit.name == axes[2]): + time_dimension_index=x_object.coordinates[index_coordinate].dimension_list[0] + oplot_time_index_coordinate=index_coordinate + + if (d.coordinates[data_time_index_coordinate].unit.unit != x_object.coordinates[oplot_time_index_coordinate].unit.unit or + d.coordinates[data_time_index_coordinate].unit.unit != y_object.coordinates[oplot_time_index_coordinate].unit.unit): + raise TypeError('The '+axes[2]+' unit of the overplotted contour object differs from the data\'s. Interpolation cannot be done.') + + if (np.sum(x_object.coordinate(axes[0])[0] - y_object.coordinate(axes[0])[0]) != 0 or + np.sum(x_object.coordinate(axes[1])[0] - y_object.coordinate(axes[1])[0]) != 0): + raise ValueError('The x or y coordinates of the x and y arrow objects do not match.') + + unit_conversion_coeff=[1.] * 3 + for index_data_coordinate in range(len(d.coordinates)): + for index_oplot_coordinate in range(len(x_object.coordinates)): + for index_axes in range(3): + if ((d.coordinates[index_data_coordinate].unit.name == axes[index_axes]) and + (x_object.coordinates[index_oplot_coordinate].unit.name) == axes[index_axes]): + unit_conversion_coeff[index_axes] = flap.tools.unit_conversion(original_unit=x_object.coordinates[index_oplot_coordinate].unit.unit, + new_unit=d.coordinates[index_data_coordinate].unit.unit) + x_object_interp=flap.slice_data(x_object_name, + exp_id=d.exp_id, + slicing={axes[2]:tdata}, + options={'Interpolation':'Linear'}) + y_object_interp=flap.slice_data(y_object_name, + exp_id=d.exp_id, + slicing={axes[2]:tdata}, + options={'Interpolation':'Linear'}) + + overplot_options['arrow'][contour_obj_keys]['data']={} + overplot_options['arrow'][contour_obj_keys]['data']['Data X']=x_object_interp.data*unit_conversion_coeff[0] + overplot_options['arrow'][contour_obj_keys]['data']['Data Y']=y_object_interp.data*unit_conversion_coeff[1] + + overplot_options['arrow'][contour_obj_keys]['data']['X coord']=x_object_interp.coordinate(axes[0])[0]*unit_conversion_coeff[0] + overplot_options['arrow'][contour_obj_keys]['data']['Y coord']=y_object_interp.coordinate(axes[1])[0]*unit_conversion_coeff[1] + + overplot_options['arrow'][contour_obj_keys]['data'][axes[2]]=tdata + overplot_options['arrow'][contour_obj_keys]['data']['Time dimension']=time_dimension_index + # X range and Z range is processed here, but Y range not as it might have multiple entries for some plots xrange = _options['X range'] if (xrange is not None): if ((type(xrange) is not list) or (len(xrange) != 2)): raise ValueError("Invalid X range setting.") + zrange = _options['Z range'] if (zrange is not None): if ((type(zrange) is not list) or (len(zrange) != 2)): @@ -1144,6 +1428,10 @@ def _plot(data_object, contour_levels = _options['Levels'] # Here _plot_id is a valid (maybe empty) PlotID + """ --------------------------------------------- + | XY AND SCATTER PLOT DEFINITION | + ---------------------------------------------""" + if ((_plot_type == 'xy') or (_plot_type == 'scatter')): # 1D plots: xy, scatter and complex versions # Checking whether oveplotting into the same plot type @@ -1155,7 +1443,7 @@ def _plot(data_object, if ((_plot_id.plot_type is not None) and ((_plot_id.plot_type != 'xy') and (_plot_id.plot_type != 'scatter')) or (_plot_id.plot_subtype is not None) and (_plot_id.plot_subtype != subtype)): raise ValueError("Overplotting into different plot type. Use option={'Clear':True} to erase first.") - # Processing axes + #Processing axes default_axes = [d.coordinates[0].unit.name, '__Data__'] try: pdd_list, ax_list = _plot_id.check_axes(d, @@ -1167,8 +1455,8 @@ def _plot(data_object, raise e # Preparing data plotdata = [0]*2 - plotdata_low = [0]*2 #UNUSED - plotdata_high = [0]*2 #UNUSED +# plotdata_low = [0]*2 #UNUSED +# plotdata_high = [0]*2 #UNUSED ploterror = [0]*2 for ax_ind in range(2): if (pdd_list[ax_ind].data_type == PddType.Data): @@ -1252,29 +1540,75 @@ def _plot(data_object, _plot_opt = _plot_options[0] if (type(_plot_opt) is not dict): raise ValueError("Plot options should be a dictionary or list of dictionaries.") - + + if xerr is not None: + xerr_plot=xerr*axes_unit_conversion[0] + else: + xerr_plot=xerr + if (_plot_type == 'xy'): if (plot_error): - ax.errorbar(x,y,xerr=xerr,yerr=yerr,errorevery=errorevery,**_plot_opt) + ax.errorbar(x*axes_unit_conversion[0], + y, + xerr=xerr_plot, + yerr=yerr,errorevery=errorevery,**_plot_opt) else: - ax.plot(x,y,**_plot_opt) + ax.plot(x*axes_unit_conversion[0], + y,**_plot_opt) else: if (plot_error): - ax.errorbar(x,y,xerr=xerr,yerr=yerr,errorevery=errorevery,fmt='o',**_plot_opt) + ax.errorbar(x*axes_unit_conversion[0], + y, + xerr=xerr_plot, + yerr=yerr, + errorevery=errorevery,fmt='o',**_plot_opt) else: - ax.scatter(x,y,**_plot_opt) + ax.scatter(x*axes_unit_conversion[0], + y,**_plot_opt) - ax.set_xlabel(ax_list[0].title(language=language)) - ax.set_ylabel(ax_list[1].title(language=language)) if (_options['Log x']): ax.set_xscale('log') if (_options['Log y']): ax.set_yscale('log') + if (xrange is not None): - ax.set_xlim(xrange[0],xrange[1]) + ax.set_xlim(xrange[0]*axes_unit_conversion[0], + xrange[1]*axes_unit_conversion[0]) + if (yrange is not None): - ax.set_ylim(yrange[0],yrange[1]) + ax.set_ylim(yrange[0], + yrange[1]) + + if (overplot_options is not None): + if overplot_options['line'] is not None: + for line_obj_keys in overplot_options['line']: + xmin, xmax = ax.get_xbound() + ymin, ymax = ax.get_ybound() + if overplot_options['line'][line_obj_keys]['Plot']: + if 'Horizontal' in overplot_options['line'][line_obj_keys]: + h_coords=overplot_options['line'][line_obj_keys]['Horizontal'] + for segments in h_coords: + if segments[0] > ymin and segments[0] < ymax: + l = mlines.Line2D([xmin,xmax], [segments[0],segments[0]], color=segments[1]) + ax.add_line(l) + if 'Vertical' in overplot_options['line'][line_obj_keys]: + v_coords=overplot_options['line'][line_obj_keys]['Vertical'] + for segments in v_coords: + if segments[0] > xmin and segments[0] < xmax: + l = mlines.Line2D([segments[0],segments[0]], [ymin,ymax], color=segments[1]) + ax.add_line(l) + # Setting axis labels + if axes_unit_conversion[0] == 1.: + ax.set_xlabel(ax_list[0].title(language=language)) + else: + ax.set_xlabel(ax_list[0].title(language=language, + new_unit=_options['Plot units'][axes[0]])) + ax.set_ylabel(ax_list[1].title(language=language)) + + ax.get_xaxis().set_visible(_options['Axes visibility'][0]) + ax.get_yaxis().set_visible(_options['Axes visibility'][1]) + title = ax.get_title() if (title is None): title = '' @@ -1357,24 +1691,30 @@ def _plot(data_object, yerror = yerror_abs # Checking for constants, converting to numpy array + if (np.isscalar(xdata)): + if (np.isscalar(ydata)): + xdata = np.full(1, xdata) + else: + xdata = np.full(ydata.size, xdata) + if (plot_error): + c=pdd_list[0].value #THESE THREE VARIABLES WERE UNDIFINED + xdata_low=np.min(xdata) + xdata_high=np.max(xdata) + xerror = d._plot_coord_ranges(c, xdata, xdata_low, xdata_high) + else: + xerror = None if (np.isscalar(ydata)): if (np.isscalar(xdata)): ydata = np.full(1, ydata) else: ydata = np.full(xdata.size, ydata) if (plot_error): - yerror = d._plot_coord_ranges(c, ydata, ydata_low, ydata_high) #THESE ARE UNDEFINED + c=pdd_list[1].value #THESE THREE VARIABLES WERE UNDIFINED + ydata_low=np.min(ydata) + ydata_high=np.max(ydata) + yerror = d._plot_coord_ranges(c, ydata, ydata_low, ydata_high) else: yerror = None - if (np.isscalar(xdata)): - if (np.isscalar(ydata)): - xdata = np.full(1, xdata) - else: - xdata = np.full(ydata.size, xdata) - if (plot_error): - xerror = d._plot_coord_ranges(c, xdata, xdata_low, xdata_high) #THESE ARE UNDEFINED - else: - xerror = None if (all_points is True): x = xdata @@ -1395,30 +1735,78 @@ def _plot(data_object, _plot_opt[i_comp] if (type(_plot_opt) is not dict): raise ValueError("Plot options should be a dictionary or list of dictionaries.") - + + if xerr is not None: + xerr_plot=xerr*axes_unit_conversion[0] + else: + xerr_plot=xerr + if (_plot_type == 'xy'): if (plot_error): - ax.errorbar(x,y,xerr=xerr,yerr=yerr,errorevery=errorevery,**_plot_opt) + ax.errorbar(x*axes_unit_conversion[0], + y, + xerr=xerr_plot, + yerr=yerr, + errorevery=errorevery,**_plot_opt) else: - ax.plot(x,y,**_plot_opt) + ax.plot(x*axes_unit_conversion[0], + y,**_plot_opt) else: if (plot_error): - ax.errorbar(x,y,xerr=xerr,yerr=yerr,errorevery=errorevery,fmt='o',**_plot_opt) + ax.errorbar(x*axes_unit_conversion[0], + y, + xerr=xerr_plot, + yerr=yerr, + errorevery=errorevery,fmt='o',**_plot_opt) else: - ax.scatter(x,y,**_plot_opt) - - # Setting axis labels - ax.set_xlabel(ax_list[0].title(language=language)) - ax.set_ylabel(ax_list[1].title(language=language,complex_txt=[comptype,i_comp])) + ax.scatter(x*axes_unit_conversion[0], + y,**_plot_opt) if (_options['Log x']): ax.set_xscale('log') if (_options['Log y']): ax.set_yscale('log') + if (xrange is not None): - ax.set_xlim(xrange[0],xrange[1]) + ax.set_xlim(xrange[0]*axes_unit_conversion[0], + xrange[1]*axes_unit_conversion[0]) + if (yrange is not None): - ax.set_ylim(yrange[i_comp][0],yrange[i_comp][1]) + ax.set_ylim(yrange[i_comp][0],yrange[i_comp][1]) + if (overplot_options is not None): + if overplot_options['line'] is not None: + for line_obj_keys in overplot_options['line']: + xmin, xmax = ax.get_xbound() + ymin, ymax = ax.get_ybound() + + if overplot_options['line'][line_obj_keys]['Plot']: + if 'Horizontal' in overplot_options['line'][line_obj_keys]: + h_coords=overplot_options['line'][line_obj_keys]['Horizontal'] + for segments in h_coords: + if segments[0] > ymin and segments[0] < ymax: + l = mlines.Line2D([xmin,xmax], [segments[0],segments[0]], color=segments[1]) + ax.add_line(l) + + if 'Vertical' in overplot_options['line'][line_obj_keys]: + v_coords=overplot_options['line'][line_obj_keys]['Vertical'] + for segments in v_coords: + if segments[0] > xmin and segments[0] < xmax: + l = mlines.Line2D([segments[0],segments[0]], [ymin,ymax], color=segments[1]) + ax.add_line(l) + + # Setting axis labels + if axes_unit_conversion[0] == 1.: + ax.set_xlabel(ax_list[0].title(language=language)) + else: + ax.set_xlabel(ax_list[0].title(language=language, + new_unit=_options['Plot units'][axes[0]])) + + + ax.set_ylabel(ax_list[1].title(language=language), + complex_txt=[comptype,i_comp]) + ax.get_xaxis().set_visible(_options['Axes visibility'][0]) + ax.get_yaxis().set_visible(_options['Axes visibility'][1]) + title = ax.get_title() if (title is None): title = '' @@ -1440,6 +1828,10 @@ def _plot(data_object, _plot_id.plot_subtype = 1 # End of complex xy and scatter plot + + """ --------------------------------------------- + | MULTIXY PLOT DEFINITION | + ---------------------------------------------""" elif (_plot_type == 'multi xy'): if (len(d.shape) > 2): @@ -1607,22 +1999,60 @@ def _plot(data_object, errorevery = int(round(len(x)/errorbars)) if (errorevery < 1): errorevery = 1 + if xerr is not None: + xerr_plot=xerr*axes_unit_conversion[0] + else: + xerr_plot=xerr if (plot_error): - ax.errorbar(x,y,xerr=xerr,yerr=yerr,errorevery=errorevery,**_plot_opt) + ax.errorbar(x*axes_unit_conversion[0], + y, + xerr=xerr_plot, + yerr=yerr, + errorevery=errorevery,**_plot_opt) else: - ax.plot(x,y,**_plot_opt) - + ax.plot(x*axes_unit_conversion[0], + y, + **_plot_opt) + if (xrange is not None): - ax.set_xlim(xrange[0],xrange[1]) + ax.set_xlim(xrange[0]*axes_unit_conversion[0], + xrange[1]*axes_unit_conversion[0]) + if (yrange is not None): - ax.set_ylim(yrange[0],yrange[1]) - ax.set_xlabel(ax_list[0].title(language=language)) - ax.set_ylabel(ax_list[1].title(language=language)) + ax.set_ylim(yrange[0],yrange[1]) + if (overplot_options is not None): + if overplot_options['line'] is not None: + for line_obj_keys in overplot_options['line']: + xmin, xmax = ax.get_xbound() + ymin, ymax = ax.get_ybound() + if overplot_options['line'][line_obj_keys]['Plot']: + if 'Horizontal' in overplot_options['line'][line_obj_keys]: + h_coords=overplot_options['line'][line_obj_keys]['Horizontal'] + for segments in h_coords: + if segments[0] > ymin and segments[0] < ymax: + l = mlines.Line2D([xmin,xmax], [segments[0],segments[0]], color=segments[1]) + ax.add_line(l) + if 'Vertical' in overplot_options['line'][line_obj_keys]: + v_coords=overplot_options['line'][line_obj_keys]['Vertical'] + for segments in v_coords: + if segments[0] > xmin and segments[0] < xmax: + l = mlines.Line2D([segments[0],segments[0]], [ymin,ymax], color=segments[1]) + ax.add_line(l) + + if axes_unit_conversion[0] == 1.: + ax.set_xlabel(ax_list[0].title(language=language)) + else: + ax.set_xlabel(ax_list[0].title(language=language, + new_unit=_options['Plot units'][axes[0]])) + + ax.set_ylabel(ax_list[1].title(language=language)) + if (_options['Log x']): ax.set_xscale('log') if (_options['Log y']): ax.set_yscale('log') title = ax.get_title() + if (title is None): title = '' if (title[-3:] != '...'): @@ -1642,6 +2072,10 @@ def _plot(data_object, ax.set_title(title) _plot_id.plot_subtype = 0 # real multi xy plot + """ --------------------------------------------- + | IMAGE AND CONTOUR PLOT DEFINITION | + ---------------------------------------------""" + elif ((_plot_type == 'image') or (_plot_type == 'contour')): if (d.data is None): raise ValueError("Cannot plot DataObject without data.") @@ -1651,7 +2085,7 @@ def _plot(data_object, raise TypeError("Image/contour plot is applicable only to real data.") # Checking for numeric type try: - d.data[0,0] += 1 + d.data[0,0]+1 except TypeError: raise TypeError("Image plot is applicable only to numeric data.") @@ -1674,13 +2108,11 @@ def _plot(data_object, # No overplotting is possible for this type of plot, erasing and restarting a Plot_ID if (not _options['Clear']): - plt.subplot(_plot_id.base_subplot) plt.cla() gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=_plot_id.base_subplot) _plot_id.plt_axis_list = [] _plot_id.plt_axis_list.append(plt.subplot(gs[0,0])) ax = _plot_id.plt_axis_list[0] - pdd_list[2].data_type = PddType.Data pdd_list[2].data_object = d if ((pdd_list[0].data_type != PddType.Coordinate) or (pdd_list[1].data_type != PddType.Coordinate)) : @@ -1688,6 +2120,12 @@ def _plot(data_object, coord_x = pdd_list[0].value coord_y = pdd_list[1].value + if _options['Equal axes']: + if (coord_x.unit.unit == coord_y.unit.unit): + #ax.axis('equal') + ax.set_aspect(1.0) + else: + print('Equal axis is not possible. The axes units are not equal.') if (_plot_type == 'image'): if ((coord_x.mode.equidistant) and (len(coord_x.dimension_list) == 1) and (coord_y.mode.equidistant) and (len(coord_y.dimension_list) == 1)): @@ -1719,11 +2157,15 @@ def _plot(data_object, else: image_like = False if (image_like): - xdata_range = coord_x.data_range(data_shape=d.shape)[0] - ydata_range = coord_y.data_range(data_shape=d.shape)[0] + xdata_range = coord_x.data_range(data_shape=d.shape)[0] + ydata_range = coord_y.data_range(data_shape=d.shape)[0] + xdata_range = [axes_unit_conversion[0] * xdata_range[0], + axes_unit_conversion[0] * xdata_range[1]] + ydata_range = [axes_unit_conversion[1] * ydata_range[0], + axes_unit_conversion[1] * ydata_range[1]] else: - ydata,ydata_low,ydata_high = coord_y.data(data_shape=d.shape) xdata,xdata_low,xdata_high = coord_x.data(data_shape=d.shape) + ydata,ydata_low,ydata_high = coord_y.data(data_shape=d.shape) if (zrange is None): vmin = np.nanmin(d.data) @@ -1739,10 +2181,11 @@ def _plot(data_object, if (vmin <= 0): raise ValueError("z range[0] cannot be negative or zero for logarithmic scale.") norm = colors.LogNorm(vmin=vmin, vmax=vmax) - locator = ticker.LogLocator(subs='all') + ticker.LogLocator(subs='all') + #locator = ticker.LogLocator(subs='all') #VARIABLE WAS UNUSED --> UNASSIGNED NOW else: norm = None - locator = None + #locator = None if (contour_levels is None): contour_levels = 255 @@ -1771,19 +2214,87 @@ def _plot(data_object, raise e else: if (_plot_type == 'image'): - xgrid, ygrid = flap.tools.grid_to_box(xdata,ydata) + xgrid, ygrid = flap.tools.grid_to_box(xdata*axes_unit_conversion[0], + ydata*axes_unit_conversion[1]) try: - img = ax.pcolormesh(xgrid,ygrid,np.clip(np.transpose(d.data),vmin,vmax),norm=norm,cmap=cmap,vmin=vmin, + img = ax.pcolormesh(xgrid,ygrid, + np.clip(np.transpose(d.data),vmin,vmax), + norm=norm,cmap=cmap,vmin=vmin, vmax=vmax,**_plot_opt) except Exception as e: raise e else: try: - img = ax.contourf(xdata,ydata,np.clip(d.data,vmin,vmax),contour_levels,norm=norm, - origin='lower',cmap=cmap,vmin=vmin,vmax=vmax,**_plot_opt) + img = ax.contourf(xdata*axes_unit_conversion[0], + ydata*axes_unit_conversion[1], + np.clip(d.data,vmin,vmax), + contour_levels,norm=norm, + origin='lower',cmap=cmap, + vmin=vmin,vmax=vmax,**_plot_opt) except Exception as e: - raise e - + raise e + + if (overplot_options is not None): + ax.set_autoscale_on(False) + for path_obj_keys in overplot_options['path']: + if overplot_options['path'][path_obj_keys]['Plot']: + ax.plot(overplot_options['path'][path_obj_keys]['data']['Data'][0,:]*axes_unit_conversion[0], + overplot_options['path'][path_obj_keys]['data']['Data'][1,:]*axes_unit_conversion[1], + color=overplot_options['path'][path_obj_keys]['Color']) + + for contour_obj_keys in overplot_options['contour']: + if overplot_options['contour'][contour_obj_keys]['Plot']: + ax.contour(overplot_options['contour'][contour_obj_keys]['data']['X coord'].transpose()*axes_unit_conversion[0], + overplot_options['contour'][contour_obj_keys]['data']['Y coord'].transpose()*axes_unit_conversion[1], + overplot_options['contour'][contour_obj_keys]['data']['Data'], + levels=overplot_options['contour'][contour_obj_keys]['nlevel'], + cmap=overplot_options['contour'][contour_obj_keys]['Colormap']) + + for contour_obj_keys in overplot_options['arrow']: + if overplot_options['arrow'][contour_obj_keys]['Plot']: + + x_coords=overplot_options['arrow'][contour_obj_keys]['data']['X coord'].flatten() + y_coords=overplot_options['arrow'][contour_obj_keys]['data']['Y coord'].flatten() + data_x=overplot_options['arrow'][contour_obj_keys]['data']['Data X'].flatten() + data_y=overplot_options['arrow'][contour_obj_keys]['data']['Data Y'].flatten() + if 'width' in overplot_options['arrow'][contour_obj_keys].keys(): + arrow_width=overplot_options['arrow'][contour_obj_keys]['width'] + else: + arrow_width=0.001 + if 'color' in overplot_options['arrow'][contour_obj_keys].keys(): + arrow_color=overplot_options['arrow'][contour_obj_keys]['color'] + else: + arrow_color='black' + + for i_coord in range(len(x_coords)): + ax.arrow(x_coords[i_coord], + y_coords[i_coord], + data_x[i_coord], + data_y[i_coord], + width=arrow_width, + color=arrow_color, + length_includes_head=True, + head_width=arrow_width*3, + head_length=arrow_width*3, + ) + + for line_obj_keys in overplot_options['line']: + xmin, xmax = ax.get_xbound() + ymin, ymax = ax.get_ybound() + if overplot_options['line'][line_obj_keys]['Plot']: + if 'Horizontal' in overplot_options['line'][line_obj_keys]: + h_coords=overplot_options['line'][line_obj_keys]['Horizontal'] + for segments in h_coords: + if segments[0] > ymin and segments[0] < ymax: + l = mlines.Line2D([xmin,xmax], [segments[0],segments[0]], color=segments[1]) + ax.add_line(l) + if 'Vertical' in overplot_options['line'][line_obj_keys]: + v_coords=overplot_options['line'][line_obj_keys]['Vertical'] + for segments in v_coords: + if segments[0] > xmin and segments[0] < xmax: + l = mlines.Line2D([segments[0],segments[0]], [ymin,ymax], color=segments[1]) + ax.add_line(l) + if (_options['Colorbar']): cbar = plt.colorbar(img,ax=ax) if (d.data_unit.unit is not None) and (d.data_unit.unit != ''): @@ -1793,15 +2304,32 @@ def _plot(data_object, cbar.set_label(d.data_unit.name+' '+unit_name) if (xrange is not None): - ax.set_xlim(xrange[0],xrange[1]) + ax.set_xlim(xrange[0]*axes_unit_conversion[0], + xrange[1]*axes_unit_conversion[0]) + if (yrange is not None): - ax.set_ylim(yrange[0],yrange[1]) - ax.set_xlabel(ax_list[0].title(language=language)) - ax.set_ylabel(ax_list[1].title(language=language)) + ax.set_ylim(yrange[0]*axes_unit_conversion[1], + yrange[1]*axes_unit_conversion[1]) + + if axes_unit_conversion[0] == 1.: + ax.set_xlabel(ax_list[0].title(language=language)) + else: + ax.set_xlabel(ax_list[0].title(language=language, + new_unit=_options['Plot units'][axes[0]])) + + if axes_unit_conversion[1] == 1.: + ax.set_ylabel(ax_list[1].title(language=language)) + else: + ax.set_ylabel(ax_list[1].title(language=language, + new_unit=_options['Plot units'][axes[1]])) + ax.get_xaxis().set_visible(_options['Axes visibility'][0]) + ax.get_yaxis().set_visible(_options['Axes visibility'][1]) + if (_options['Log x']): ax.set_xscale('log') if (_options['Log y']): - ax.set_yscale('log') + ax.set_yscale('log') + title = ax.get_title() if (title is None): title = '' @@ -1820,6 +2348,10 @@ def _plot(data_object, else: title += ',...' ax.set_title(title) + + """ ----------------------------------------------- + | ANIM-IMAGE AND ANIM-CONTOUR PLOT DEFINITION | + -----------------------------------------------""" elif ((_plot_type == 'anim-image') or (_plot_type == 'anim-contour')): if (d.data is None): @@ -1864,18 +2396,6 @@ def _plot(data_object, if (len(coord_t.dimension_list) != 1): raise ValueError("Time coordinate for anim-image/anim-contour plot should be changing only along one dimension.") - try: - coord_x.dimension_list.index(coord_t.dimension_list[0]) - badx = True - except: - badx = False - try: - coord_y.dimension_list.index(coord_t.dimension_list[0]) - bady = True - except: - bady = False - if (badx or bady): - raise ValueError("X and y coordinate for anim-image plot should not change in time dimension.") index = [0] * 3 index[coord_t.dimension_list[0]] = ... @@ -1887,6 +2407,8 @@ def _plot(data_object, (coord_y.mode.equidistant) and (len(coord_y.dimension_list) == 1)): # This data is image-like with data points on a rectangular array image_like = True + xdata=coord_x.data(data_shape=d.data.shape)[0] + ydata=coord_y.data(data_shape=d.data.shape)[0] elif ((len(coord_x.dimension_list) == 1) and (len(coord_y.dimension_list) == 1)): if (not coord_x.isnumeric()): raise ValueError('Coordinate '+coord_x.unit.name+' is not numeric.') @@ -1911,14 +2433,19 @@ def _plot(data_object, else: image_like = False if (image_like and (_plot_type == 'anim-image')): - xdata_range = coord_x.data_range(data_shape=d.shape)[0] - ydata_range = coord_y.data_range(data_shape=d.shape)[0] + xdata_range = coord_x.data_range(data_shape=d.shape)[0] + ydata_range = coord_y.data_range(data_shape=d.shape)[0] + xdata_range = [axes_unit_conversion[0] * xdata_range[0], + axes_unit_conversion[0] * xdata_range[1]] + ydata_range = [axes_unit_conversion[1] * ydata_range[0], + axes_unit_conversion[1] * ydata_range[1]] else: index = [...]*3 - index[coord_t.dimension_list[0]] = 0 + if (len(coord_x.dimension_list) < 3 and + len(coord_y.dimension_list) < 3): + index[coord_t.dimension_list[0]] = 0 ydata = np.squeeze(coord_y.data(data_shape=d.shape,index=index)[0]) xdata = np.squeeze(coord_x.data(data_shape=d.shape,index=index)[0]) - try: cmap_obj = plt.cm.get_cmap(cmap) if (_options['Nan color'] is not None): @@ -1927,7 +2454,6 @@ def _plot(data_object, raise ValueError("Invalid color map.") gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=_plot_id.base_subplot) -# ax=plt.plot() _plot_id.plt_axis_list = [] # _plot_id.plt_axis_list.append(plt.subplot(gs[0,0])) _plot_id.plt_axis_list.append(_plot_id.base_subplot) @@ -1935,6 +2461,15 @@ def _plot(data_object, # plt.plot() # plt.cla() # ax=plt.gca() + + if (xrange is None): + xrange=[np.min(xdata),np.max(xdata)] + if (yrange is None): + yrange=[np.min(ydata),np.max(ydata)] + + if _options['Equal axes'] and not coord_x.unit.unit == coord_y.unit.unit: + print('Equal axis is not possible. The axes units are not equal.') + for it in range(len(tdata)): # This is a hack here. The problem is, that the colorbar() call reduces the axes size del gs @@ -1943,13 +2478,16 @@ def _plot(data_object, # plt.subplot(_plot_id.base_subplot) # ax_act = _plot_id.base_subplot ax_act = plt.subplot(gs[0,0]) + if _options['Equal axes'] and coord_x.unit.unit == coord_y.unit.unit: + #ax_act.axis('equal') + ax_act.set_aspect(1.0) time_index = [slice(0,dim) for dim in d.data.shape] time_index[coord_t.dimension_list[0]] = it time_index = tuple(time_index) if (zrange is None): - vmin = np.nanmin(d.data[time_index]) - vmax = np.nanmax(d.data[time_index]) + vmin = np.nanmin(d.data) + vmax = np.nanmax(d.data) else: vmin = zrange[0] vmax = zrange[1] @@ -1961,19 +2499,23 @@ def _plot(data_object, if (vmin <= 0): raise ValueError("z range[0] cannot be negative or zero for logarithmic scale.") norm = colors.LogNorm(vmin=vmin, vmax=vmax) - locator = ticker.LogLocator(subs='all') + #locator = ticker.LogLocator(subs='all') + ticker.LogLocator(subs='all') else: norm = None - locator = None #UNUSED + #locator = None if (contour_levels is None): contour_levels = 255 + ax_act.clear() + plt.xlim(xrange[0]*axes_unit_conversion[0], + xrange[1]*axes_unit_conversion[0]) + plt.ylim(yrange[0]*axes_unit_conversion[1], + yrange[1]*axes_unit_conversion[1]) _plot_opt = _plot_options[0] - if (image_like and (_plot_type == 'anim-image')): try: - #if (coord_x.dimension_list[0] == 0): if (coord_x.dimension_list[0] < coord_y.dimension_list[0]): im = np.clip(np.transpose(d.data[time_index]),vmin,vmax) else: @@ -1988,8 +2530,16 @@ def _plot(data_object, except Exception as e: raise e else: + ax_act.set_autoscale_on(False) if (_plot_type == 'anim-image'): - xgrid, ygrid = flap.tools.grid_to_box(xdata,ydata) + #xgrid, ygrid = flap.tools.grid_to_box(xdata*axes_unit_conversion[0], + # ydata*axes_unit_conversion[1]) + if (len(xdata.shape) == 3 and len(ydata.shape) == 3): + xgrid, ygrid = flap.tools.grid_to_box(xdata[time_index]*axes_unit_conversion[0], + ydata[time_index]*axes_unit_conversion[1]) + else: + xgrid, ygrid = flap.tools.grid_to_box(xdata*axes_unit_conversion[0], + ydata*axes_unit_conversion[1]) im = np.clip(np.transpose(d.data[time_index]),vmin,vmax) try: img = plt.pcolormesh(xgrid,ygrid,im,norm=norm,cmap=cmap,vmin=vmin, @@ -1999,28 +2549,118 @@ def _plot(data_object, del im else: try: + if (len(xdata.shape) == 3 and len(ydata.shape) == 3): + xgrid=xdata[time_index]*axes_unit_conversion[1] + ygrid=ydata[time_index]*axes_unit_conversion[1] + else: + xgrid=xdata*axes_unit_conversion[0] + ygrid=ydata*axes_unit_conversion[1] im = np.clip(d.data[time_index],vmin,vmax) - img = plt.contourf(xdata,ydata,im,contour_levels,norm=norm, - origin='lower',cmap=cmap,vmin=vmin,vmax=vmax,**_plot_opt) + img = plt.contourf(xgrid, + ygrid, + im, + contour_levels,norm=norm,origin='lower', + cmap=cmap,vmin=vmin,vmax=vmax,**_plot_opt) del im except Exception as e: raise e - + + if (overplot_options is not None): + ax_act.set_autoscale_on(False) + for path_obj_keys in overplot_options['path']: + if overplot_options['path'][path_obj_keys]['Plot']: + time_index = [slice(0,dim) for dim in overplot_options['path'][path_obj_keys]['data']['Data resampled'][0].shape] + time_index[overplot_options['path'][path_obj_keys]['data']['Time dimension']] = it + time_index = tuple(time_index) + im = plt.plot(overplot_options['path'][path_obj_keys]['data']['Data resampled'][0][time_index]*axes_unit_conversion[0], + overplot_options['path'][path_obj_keys]['data']['Data resampled'][1][time_index]*axes_unit_conversion[1], + color=overplot_options['path'][path_obj_keys]['Color']) + + for contour_obj_keys in overplot_options['contour']: + if overplot_options['contour'][contour_obj_keys]['Plot']: + time_index = [slice(0,dim) for dim in overplot_options['contour'][contour_obj_keys]['data']['Data resampled'].shape] + time_index[overplot_options['contour'][contour_obj_keys]['data']['Time dimension']] = it + time_index = tuple(time_index) + im = plt.contour(overplot_options['contour'][contour_obj_keys]['data']['X coord resampled'][time_index].transpose()*axes_unit_conversion[0], + overplot_options['contour'][contour_obj_keys]['data']['Y coord resampled'][time_index].transpose()*axes_unit_conversion[1], + overplot_options['contour'][contour_obj_keys]['data']['Data resampled'][time_index], + levels=overplot_options['contour'][contour_obj_keys]['nlevel'], + cmap=overplot_options['contour'][contour_obj_keys]['Colormap']) + + + for contour_obj_keys in overplot_options['arrow']: + if overplot_options['arrow'][contour_obj_keys]['Plot']: + + time_index = [slice(0,dim) for dim in overplot_options['arrow'][contour_obj_keys]['data']['Data X'].shape] + time_index[overplot_options['arrow'][contour_obj_keys]['data']['Time dimension']] = it + time_index = tuple(time_index) + + x_coords=overplot_options['arrow'][contour_obj_keys]['data']['X coord'][time_index].flatten()*axes_unit_conversion[0] + y_coords=overplot_options['arrow'][contour_obj_keys]['data']['Y coord'][time_index].flatten()*axes_unit_conversion[1] + data_x=overplot_options['arrow'][contour_obj_keys]['data']['Data X'][time_index].flatten()*axes_unit_conversion[0] + data_y=overplot_options['arrow'][contour_obj_keys]['data']['Data Y'][time_index].flatten()*axes_unit_conversion[1] + + for i_coord in range(len(x_coords)): + plt.arrow(x_coords[i_coord], + y_coords[i_coord], + data_x[i_coord], + data_y[i_coord], + width=overplot_options['arrow'][contour_obj_keys]['width'], + color=overplot_options['arrow'][contour_obj_keys]['color'], + length_includes_head=True, + ) + + for line_obj_keys in overplot_options['line']: + xmin, xmax = ax_act.get_xbound() + ymin, ymax = ax_act.get_ybound() + if overplot_options['line'][line_obj_keys]['Plot']: + if 'Horizontal' in overplot_options['line'][line_obj_keys]: + h_coords=overplot_options['line'][line_obj_keys]['Horizontal'] + for segments in h_coords: + if segments[0] > ymin and segments[0] < ymax: + l = mlines.Line2D([xmin,xmax], [segments[0],segments[0]], color=segments[1]) + ax_act.add_line(l) + if 'Vertical' in overplot_options['line'][line_obj_keys]: + v_coords=overplot_options['line'][line_obj_keys]['Vertical'] + for segments in v_coords: + if segments[0] > xmin and segments[0] < xmax: + l = mlines.Line2D([segments[0],segments[0]], [ymin,ymax], color=segments[1]) + ax_act.add_line(l) + if (_options['Colorbar']): cbar = plt.colorbar(img,ax=ax_act) cbar.set_label(d.data_unit.name) - - if (xrange is not None): - plt.xlim(xrange[0],xrange[1]) - if (yrange is not None): - plt.ylim(yrange[0],yrange[1]) - plt.xlabel(ax_list[0].title(language=language)) - plt.ylabel(ax_list[1].title(language=language)) + + if axes_unit_conversion[0] == 1.: + plt.xlabel(ax_list[0].title(language=language)) + else: + plt.xlabel(ax_list[0].title(language=language, + new_unit=_options['Plot units'][axes[0]])) + + if axes_unit_conversion[1] == 1.: + plt.ylabel(ax_list[1].title(language=language)) + else: + plt.ylabel(ax_list[1].title(language=language, + new_unit=_options['Plot units'][axes[1]])) + if (_options['Log x']): plt.xscale('log') if (_options['Log y']): plt.yscale('log') - title = str(d.exp_id)+' @ '+coord_t.unit.name+'='+"{:10.5f}".format(tdata[it])+' ['+coord_t.unit.unit+']' + + if _options['Plot units'] is not None: + if axes[2] in _options['Plot units']: + time_unit=_options['Plot units'][axes[2]] + time_coeff=axes_unit_conversion[2] + else: + time_unit=coord_t.unit.unit + time_coeff=1. + else: + time_unit=coord_t.unit.unit + time_coeff=1. + title = str(d.exp_id)+' @ '+coord_t.unit.name+'='+"{:10.5f}".format(tdata[it]*time_coeff)+\ + ' ['+time_unit+']' + plt.title(title) plt.show(block=False) time.sleep(_options['Waittime']) @@ -2031,7 +2671,10 @@ def _plot(data_object, # Get the RGBA buffer from the figure w,h = fig.canvas.get_width_height() buf = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8) - buf.shape = ( h, w, 3 ) + if buf.shape[0] == h*2 * w*2 * 3: + buf.shape = ( h*2, w*2, 3 ) #ON THE MAC'S INTERNAL SCREEN, THIS COULD HAPPEN NEEDS A MORE ELEGANT FIX + else: + buf.shape = ( h, w, 3 ) buf = cv2.cvtColor(buf, cv2.COLOR_RGBA2BGR) try: video @@ -2048,6 +2691,10 @@ def _plot(data_object, cv2.destroyAllWindows() video.release() del video + + """ --------------------------------------------- + | ANIMATION PLOT DEFINITION | + ---------------------------------------------""" elif (_plot_type == 'animation'): if (d.data is None): @@ -2058,7 +2705,7 @@ def _plot(data_object, raise TypeError("Animated image plot is applicable only to real data.") # Checking for numeric type try: - d.data[0,0] += 1 + d.data[0,0]+1 except TypeError: raise TypeError("Animated image plot is applicable only to numeric data.") @@ -2066,8 +2713,6 @@ def _plot(data_object, if (yrange is not None): if ((type(yrange) is not list) or (len(yrange) != 2)): raise ValueError("Invalid Y range setting.") - if (zrange is not None) and (_options['Prevent saturation']): - d.data=np.mod(d.data,zrange[1]) # Processing axes # Although the plot will be cleared the existing plot axes will be considered default_axes = [d.coordinates[0].unit.name, d.coordinates[1].unit.name,d.coordinates[2].unit.name,'__Data__'] @@ -2134,47 +2779,40 @@ def _plot(data_object, xdata = np.squeeze(coord_x.data(data_shape=d.shape,index=index)[0]) else: index = [...]*3 - index[coord_t.dimension_list[0]] = 0 + if (len(coord_x.dimension_list) < 3 and + len(coord_y.dimension_list) < 3): + index[coord_t.dimension_list[0]] = 0 xdata_range = None ydata_range = None ydata = np.squeeze(coord_y.data(data_shape=d.shape,index=index)[0]) xdata = np.squeeze(coord_x.data(data_shape=d.shape,index=index)[0]) - try: cmap_obj = plt.cm.get_cmap(cmap) if (_options['Nan color'] is not None): cmap_obj.set_bad(_options['Nan color']) except ValueError: raise ValueError("Invalid color map.") - gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=_plot_id.base_subplot) _plot_id.plt_axis_list = [] _plot_id.plt_axis_list.append(plt.subplot(gs[0,0])) - oargs=(ax_list, axes, d, xdata, ydata, tdata, xdata_range, ydata_range, + oargs=(ax_list, axes, axes_unit_conversion, d, xdata, ydata, tdata, xdata_range, ydata_range, cmap_obj, contour_levels, - coord_t, coord_x, coord_y, cmap, _options, + coord_t, coord_x, coord_y, cmap, _options, overplot_options, xrange, yrange, zrange, image_like, _plot_options, language, _plot_id, gs) anim = PlotAnimation(*oargs) anim.animate() - -# print("------ plot finished, show() ----") - plt.show(block=False) - -# if (_options['Clear']): -# _plot_id.number_of_plots = 0 -# _plot_id.plot_data = [] -# _plot_id.plt_axis_list = None -# _plot_id.number_of_plots += 1 + plt.show(block=False) _plot_id.axes = ax_list _plot_id.plot_data.append(pdd_list) #Setting this one the default plot ID _plot_id.options.append(_options) _plot_id.plot_type = _plot_type set_plot_id(_plot_id) - + if _options['Prevent saturation']: + del d return _plot_id diff --git a/flap/spectral_analysis.py b/flap/spectral_analysis.py index b53d532..ba518ab 100644 --- a/flap/spectral_analysis.py +++ b/flap/spectral_analysis.py @@ -170,10 +170,11 @@ def _spectral_calc_interval_selection(d, ref, coordinate,intervals,interval_n): proc_interval_index_len = int(round(proc_interval_len / step)) - 2 proc_interval_index_start = np.round((proc_interval_end - coord.start) / coord.step[0]).astype(np.int32) + 1 proc_interval_index_end = proc_interval_index_start + proc_interval_index_len + return flap.coordinate.Intervals(proc_interval_start, proc_interval_end), \ flap.coordinate.Intervals(proc_interval_index_start, proc_interval_index_end), -def trend_removal_func(d,ax, trend, x=None, return_trend=False, return_poly=False): +def trend_removal_func(d, ax, trend, x=None, return_trend=False, return_poly=False): """ This function makes the _trend_removal internal function public """ return _trend_removal(d, ax, trend, x=x, return_trend=return_trend, return_poly=return_poly) @@ -462,7 +463,7 @@ def _apsd(d, coordinate=None, intervals=None, options=None): except Exception as e: raise e if (hanning): - data_proc *= hanning_window + data_proc = data_proc * hanning_window # Calculating APS on natural resolution, full frequency scale dfft = np.fft.fft(data_proc,axis=proc_dim) dps = (dfft.conjugate() * dfft).real @@ -972,14 +973,15 @@ def _cpsd(d, ref=None, coordinate=None, intervals=None, options=None): if (hanning): hanning_window = np.hanning(index_int_high[0] - index_int_low[0] + 1) hanning_window /= math.sqrt(3./8) + hanning_window_ref=hanning_window if (len(d.shape) > 1): han_sh = [1] * len(d.shape) han_sh[proc_dim] = hanning_window.size hanning_window = hanning_window.reshape(han_sh) if (len(_ref.shape) > 1): han_sh = [1] * len(_ref.shape) - han_sh[proc_dim_ref] = hanning_window.size - hanning_window_ref = hanning_window.reshape(han_sh) + han_sh[proc_dim_ref] = hanning_window_ref.size + hanning_window_ref = hanning_window_ref.reshape(han_sh) # We need to determine a shape to which the out_data_num array will be broadcasted to # allow dividing all spectra. bs is this shape @@ -1016,8 +1018,8 @@ def _cpsd(d, ref=None, coordinate=None, intervals=None, options=None): except Exception as e: raise e if (hanning): - data_proc *= hanning_window.astype(data_proc.dtype) - data_proc_ref *= hanning_window_ref.astype(data_proc_ref.dtype) + data_proc = (data_proc * hanning_window).astype(data_proc.dtype) + data_proc_ref = (data_proc_ref * hanning_window_ref).astype(data_proc_ref.dtype) # Calculating FFT dfft = np.fft.fft(data_proc,axis=proc_dim) dfft_ref = np.fft.fft(data_proc_ref,axis=proc_dim_ref) @@ -1251,8 +1253,12 @@ def _ccf(d, ref=None, coordinate=None, intervals=None, options=None): ['Poly', n]: Fit an n order polynomial to the data and subtract. Trend removal will be applied to each interval separately. At present trend removal can be applied to 1D CCF only. - 'Normalize': Normalize with autocorrelations, that is calculate correlation instead of + 'Normalize': Normalize with autocorrelations, that is to calculate correlation instead of covariance. + 'Correct ACF peak': Switch to correct the photon peak of the ACF during normalization. + Default: False + 'Correct ACF range': Indices for the ACF peak correction around the peak. + Default: [-2,-1,1,2] 'Verbose': Print progress messages """ if (d.data is None): @@ -1264,6 +1270,8 @@ def _ccf(d, ref=None, coordinate=None, intervals=None, options=None): 'Trend removal': ['Poly', 2], 'Error calculation': True, 'Normalize': False, + 'Correct ACF peak': False, + 'Correct ACF range':[-2,-1,1,2], 'Verbose':True } _options = flap.config.merge_options(default_options, options, data_source=d.data_source, section='Correlation') @@ -1283,8 +1291,16 @@ def _ccf(d, ref=None, coordinate=None, intervals=None, options=None): trend = _options['Trend removal'] if (len(_coordinate) != 1 and (trend is not None)): raise NotImplementedError("Trend removal for multi-dimensional correlation is not implemented.") + interval_n = _options['Interval_n'] + norm = _options['Normalize'] + + correct_acf_peak=_options['Correct ACF peak'] + correct_acf_range=np.asarray(_options['Correct ACF range']) + if correct_acf_range.shape[0] < 4: + raise ValueError("The range of the correlation peak subtraction should have at least 4 elements, e.g., [-2,-1,1,2]") + error_calc = _options['Error calculation'] if (len(_coordinate) != 1 and (intervals is not None)): raise NotImplementedError("Interval selection for multi-dimensional correlation is not implemented.") @@ -1413,7 +1429,7 @@ def _ccf(d, ref=None, coordinate=None, intervals=None, options=None): # Sanity check for lag range for i,coord in enumerate(coord_obj): dr, dr1 = coord.data_range(data_shape=d.shape) - if ((abs(corr_range[i][0]) > (int_high[0] - int_low[0])) or (abs(corr_range[i][1]) > (int_high[0] - int_low[0]))): + if ((abs(corr_range[i][0]) > (dr[1] - dr[0])) or (abs(corr_range[i][1]) > (dr[1] - dr[0]))): raise ValueError("Correlation lag calculation range is too large for coordinate '{:s}'.".format(coord.unit.name)) if ((corr_range[i][1] - corr_range[i][0]) < coord.step[0]*3): raise ValueError("Correlation lag calculation range is too small for coordinate '{:s}'.".format(coord.unit.name)) @@ -1463,6 +1479,7 @@ def _ccf(d, ref=None, coordinate=None, intervals=None, options=None): correlation_dimensions_out = [] out_shape_acf = [] out_shape_acf_ref = [] + for i in range(len(d.data.shape)): try: correlation_dimensions.index(i) @@ -1518,7 +1535,7 @@ def _ccf(d, ref=None, coordinate=None, intervals=None, options=None): # zero_ind is the index where the 0 time lag will be after rearranging the CCF to final form zero_ind = [0] * len(correlation_dimensions) # A slicing list which will cut out the needed part of the CCF - # The CCF will have first the remaining dimensions of d, then the correlation dimensions, + # The CCF will have the remaining dimensions of d first, then the correlation dimensions, # then the remaining dimensions of ref ind_slice = [slice(0,d) for d in out_shape] ind_bin = copy.deepcopy(ind_slice) @@ -1532,11 +1549,14 @@ def _ccf(d, ref=None, coordinate=None, intervals=None, options=None): nfft = corr_point_n_nat[i] + pad_length[i] ind_in1[cd] = slice(0,int(nfft / 2)) ind_out1[cd] = slice(nfft-int(nfft / 2), nfft) + zero_ind[i] = nfft - int(nfft / 2) ind_in2[cd] = slice(int(nfft / 2),nfft) ind_out2[cd] = slice(0, nfft - int(nfft / 2)) + ind_slice[cd] = slice(shift_range[i][0] + zero_ind[i], shift_range[i][1] + zero_ind[i]+1) ind_bin[cd] = np.arange(ind_slice[cd].stop - ind_slice[cd].start,dtype=np.int32) // corr_res_sample[i] + if (calc_acf): ind_in1_acf[cd] = ind_in1[cd] ind_out1_acf[cd] = ind_out1[cd] @@ -1606,12 +1626,30 @@ def _ccf(d, ref=None, coordinate=None, intervals=None, options=None): res = np.fft.ifftn(res,axes=cps_corr_dims) if (out_dtype is float): res = np.real(res) - corr = np.empty(res.shape,dtype=res.dtype) - corr[tuple(ind_out1)] = res[tuple(ind_in1)] - corr[tuple(ind_out2)] = res[tuple(ind_in2)] + + if len(correlation_dimensions) == 2: + # This part of the code rearranges the corners of the 2D CCF function + # so the maximum is going to be in the middle. It does this in a generalized way + # where the non-correlating dimensions are left alone. + + corr=flap.tools.reorder_2d_ccf_indices(res, + correlation_dimensions, + ind_in1, + ind_in2, + ind_out1, + ind_out2) + + else: + corr = np.empty(res.shape,dtype=res.dtype) + corr[tuple(ind_out1)] = res[tuple(ind_in1)] + corr[tuple(ind_out2)] = res[tuple(ind_in2)] + corr_sliced = corr[tuple(ind_slice)] corr_binned = np.zeros(tuple(out_shape),dtype=res.dtype) - np.add.at(corr_binned,tuple(ind_bin),corr_sliced) + if corr_binned.shape != corr_binned.shape: + np.add.at(corr_binned,tuple(ind_bin),corr_sliced) + else: + corr_binned=corr_sliced if (norm): zero_ind_out = [0] * len(correlation_dimensions) for i in range(len(correlation_dimensions)): @@ -1639,44 +1677,241 @@ def _ccf(d, ref=None, coordinate=None, intervals=None, options=None): = copy.deepcopy(ind_autocorr[0:len(autocorr_index_shape)]) autocorr_mx = corr_binned[tuple(ind_autocorr)] extend_shape = [1] * (len(out_corr.shape) - len(autocorr_mx.shape)) - corr_binned /= np.sqrt(np.reshape(autocorr_mx,tuple(list(autocorr_mx.shape) + extend_shape))) + corr_binned /= np.sqrt(np.reshape(autocorr_mx, tuple(list(autocorr_mx.shape) + extend_shape))) corr_binned /= np.sqrt(np.reshape(autocorr_mx, tuple(extend_shape + list(autocorr_mx.shape)))) else: # We do not have the autocorrelations, calculating res_acf = np.fft.ifftn(fft * np.conj(fft),axes=correlation_dimensions) + if (out_dtype is float): res_acf = np.real(res_acf) # we need to move the correlation axes to the end to be consistent with the CCF res_acf, ax_map = flap.tools.move_axes_to_end(res_acf,correlation_dimensions) - corr_acf = np.empty(res_acf.shape,dtype=res.dtype) - corr_acf[tuple(ind_out1_acf)] = res_acf[tuple(ind_in1_acf)] - corr_acf[tuple(ind_out2_acf)] = res_acf[tuple(ind_in2_acf)] + + if len(correlation_dimensions) == 2: + # This part of the code rearranges the corners of the 2D CCF function + # so the maximum is going to be in the middle. It does this in a generalized way + # where the non-correlating dimensions are left alone. + + corr_acf=flap.tools.reorder_2d_ccf_indices(res_acf, + correlation_dimensions, + ind_in1_acf, + ind_in2_acf, + ind_out1_acf, + ind_out2_acf) + else: + corr_acf = np.empty(res_acf.shape,dtype=res.dtype) + corr_acf[tuple(ind_out1_acf)] = res_acf[tuple(ind_in1_acf)] + corr_acf[tuple(ind_out2_acf)] = res_acf[tuple(ind_in2_acf)] + corr_sliced_acf = corr_acf[tuple(ind_slice_acf)] corr_binned_acf = np.zeros(tuple(out_shape_acf),dtype=res_acf.dtype) - np.add.at(corr_binned_acf,tuple(ind_bin_acf),corr_sliced_acf) + if not corr_binned_acf.shape == corr_sliced_acf.shape: + np.add.at(corr_binned_acf,tuple(ind_bin_acf),corr_sliced_acf) + else: + corr_binned_acf=corr_sliced_acf # We need to take the zero lag elements from each correlation dimension corr_dimension_start = len(out_shape_acf)-len(correlation_dimensions) + if correct_acf_peak: + #if False: + + if corr_dimension_start == 0: #Every dimension is correlation with a lag dimension + array_2b_corrected=copy.deepcopy(corr_binned_acf) + peak_value=0. + for i in range(len(correlation_dimensions)): + if zero_ind_out[i]-2 < 0: + raise ValueError("Not enough datapoints for ACF correction.") + # ind_correction = np.arange(zero_ind_out[i]-2,zero_ind_out[i]+3) + ind_correction=correct_acf_range+zero_ind_out[i] + array_2b_corrected = np.take(array_2b_corrected,ind_correction,axis=corr_dimension_start+i) + + for i in range(len(correlation_dimensions)): + # coeff = np.polyfit([-2,-1,1,2], + # np.take(array_2b_corrected, 2, axis=corr_dimension_start+i)[np.asarray([0,1,3,4])], + # 2) + coeff = np.polyfit(correct_acf_range, + np.take(array_2b_corrected, 2, axis=corr_dimension_start+i), + 2) + + peak_value += coeff[2]-coeff[1]**2/(4*coeff[0]) + peak_value /= (i+1) + corr_binned_acf[zero_ind_out]=peak_value + + elif corr_dimension_start == 1: #The first dimension is not correlation, but the rest is. + for i_non_corr in range(corr_binned_acf.shape[0]): + array_2b_corrected=np.take(corr_binned_acf,i_non_corr,axis=0) + peak_value=0. + for i in range(len(correlation_dimensions)): + if zero_ind_out[i]-2 < 0: + raise ValueError("Not enough datapoints for ACF correction.") + # ind_correction = np.arange(zero_ind_out[i]-2,zero_ind_out[i]+3) + ind_correction=correct_acf_range+zero_ind_out[i] + array_2b_corrected = np.take(array_2b_corrected,ind_correction,axis=i) + if len(correlation_dimensions) == 1: + for i in range(len(correlation_dimensions)): + # coeff = np.polyfit([-2,-1,1,2], + # array_2b_corrected[np.asarray([0,1,3,4])], + # 2) + coeff = np.polyfit(correct_acf_range, + array_2b_corrected, + 2) + + peak_value += coeff[2]-coeff[1]**2/(4*coeff[0]) + peak_value /= (i+1) + corr_binned_acf[i_non_corr,zero_ind_out]=peak_value + elif len(correlation_dimensions) == 2: + for i in range(len(correlation_dimensions)): + # coeff = np.polyfit([-2,-1,1,2], + # np.take(array_2b_corrected, 2, axis=i)[np.asarray([0,1,3,4])], + # 2) + coeff = np.polyfit(correct_acf_range, + np.take(array_2b_corrected, 2, axis=i), + 2) + + peak_value += coeff[2]-coeff[1]**2/(4*coeff[0]) + peak_value /= (i+1) + corr_binned_acf[i_non_corr,zero_ind_out]=peak_value + elif len(correlation_dimensions) == 3: + + raise NotImplementedError('3D correlation dimensions has not been implemented yet.') + + elif corr_dimension_start == 2: + for i_non_corr_1 in range(corr_binned_acf.shape[0]): + for i_non_corr_2 in range(corr_binned_acf.shape[1]): + array_2b_corrected=np.take(corr_binned_acf,i_non_corr_1,axis=0) + array_2b_corrected=np.take(array_2b_corrected,i_non_corr_2,axis=0) + peak_value=0. + for i in range(len(correlation_dimensions)): + if zero_ind_out[i]-2 < 0: + raise ValueError("Not enough datapoints for ACF correction.") + #ind_correction = np.arange(zero_ind_out[i]-2,zero_ind_out[i]+3) + ind_correction=correct_acf_range+zero_ind_out[i] + array_2b_corrected = np.take(array_2b_corrected,ind_correction,axis=i) + + for i in range(len(correlation_dimensions)): + # coeff = np.polyfit([-2,-1,1,2], + # np.take(array_2b_corrected, 2, axis=i)[np.asarray([0,1,3,4])], + # 2) + coeff = np.polyfit(correct_acf_range, + np.take(array_2b_corrected, 2, axis=i), + 2) + + peak_value += coeff[2]-coeff[1]**2/(4*coeff[0]) + peak_value /= (i+1) + corr_binned_acf[i_non_corr_1,i_non_corr_2,zero_ind_out]=peak_value + + elif corr_dimension_start == 3: + for i_non_corr_1 in range(corr_binned_acf.shape[0]): + for i_non_corr_2 in range(corr_binned_acf.shape[1]): + for i_non_corr_3 in range(corr_binned_acf.shape[2]): + + array_2b_corrected=np.take(corr_binned_acf,i_non_corr_1,axis=0) + array_2b_corrected=np.take(array_2b_corrected,i_non_corr_2,axis=0) + array_2b_corrected=np.take(array_2b_corrected,i_non_corr_3,axis=0) + peak_value=0. + for i in range(len(correlation_dimensions)): + if zero_ind_out[i]-2 < 0: + raise ValueError("Not enough datapoints for ACF correction.") + #ind_correction = np.arange(zero_ind_out[i]-2,zero_ind_out[i]+3) + ind_correction=correct_acf_range+zero_ind_out[i] + array_2b_corrected = np.take(array_2b_corrected,ind_correction,axis=i) + + for i in range(len(correlation_dimensions)): + # coeff = np.polyfit([-2,-1,1,2], + # np.take(array_2b_corrected, 2, axis=i)[np.asarray([0,1,3,4])], + # 2) + coeff = np.polyfit(correct_acf_range, + np.take(array_2b_corrected, 2, axis=i), + 2) + + peak_value += coeff[2]-coeff[1]**2/(4*coeff[0]) + peak_value /= (i+1) + corr_binned_acf[i_non_corr_1,i_non_corr_2,i_non_corr_3,zero_ind_out]=peak_value + + + # We need to take the zero lag elements from each correlation dimension for i in range(len(correlation_dimensions)): - # Always the cor_dimension_start axis is taken as it is removed by take + # Always the cor_dimension_start axis is taken as it is removed by take corr_binned_acf = np.take(corr_binned_acf,zero_ind_out[i],axis=corr_dimension_start) + if (ref is not None): res_acf_ref = np.fft.ifftn(fft_ref * np.conj(fft_ref),axes=correlation_dimensions_ref) if (out_dtype is float): res_acf_ref = np.real(res_acf_ref) # we need to move the correlation axes to the start to be consistent with the CCF - res_acf_ref,ax_map_ref = flap.tools.move_axes_to_start(res_acf_ref,correlation_dimensions_ref) - corr_acf_ref = np.empty(res_acf_ref.shape,dtype=res_acf.dtype) - corr_acf_ref[tuple(ind_out1_acf_ref)] = res_acf_ref[tuple(ind_in1_acf_ref)] - corr_acf_ref[tuple(ind_out2_acf_ref)] = res_acf_ref[tuple(ind_in2_acf_ref)] + if len(_coordinate) != 2: + res_acf_ref,ax_map_ref = flap.tools.move_axes_to_end(res_acf_ref,correlation_dimensions_ref) + + + if len(correlation_dimensions) == 2: + corr_acf_ref=flap.tools.reorder_2d_ccf_indices(res_acf_ref, + correlation_dimensions, + ind_in1_acf_ref, + ind_in2_acf_ref, + ind_out1_acf_ref, + ind_out2_acf_ref) + else: + corr_acf_ref = np.empty(res_acf_ref.shape,dtype=res_acf.dtype) + corr_acf_ref[tuple(ind_out1_acf_ref)] = res_acf_ref[tuple(ind_in1_acf_ref)] + corr_acf_ref[tuple(ind_out2_acf_ref)] = res_acf_ref[tuple(ind_in2_acf_ref)] + corr_sliced_acf_ref = corr_acf_ref[tuple(ind_slice_acf_ref)] corr_binned_acf_ref = np.zeros(tuple(out_shape_acf_ref),dtype=res_acf_ref.dtype) - np.add.at(corr_binned_acf_ref,tuple(ind_bin_acf_ref),corr_sliced_acf_ref) + + if not corr_binned_acf_ref.shape == corr_sliced_acf_ref.shape: + np.add.at(corr_binned_acf_ref,tuple(ind_bin_acf_ref),corr_sliced_acf_ref) + else: + corr_binned_acf_ref=corr_sliced_acf_ref + + if correct_acf_peak: + + array_2b_corrected=copy.deepcopy(corr_binned_acf_ref) + + peak_value=0. + for i in range(len(correlation_dimensions_ref)): + if zero_ind_out[i]-2 < 0: + raise ValueError("Not enough datapoints for ACF correction.") + #ind_correction = np.arange(zero_ind_out[i]-2,zero_ind_out[i]+3) + ind_correction=correct_acf_range+zero_ind_out[i] + array_2b_corrected = np.take(array_2b_corrected,ind_correction,axis=i) + if len(correlation_dimensions) == 1: + for i in range(len(correlation_dimensions)): + # coeff = np.polyfit([-2,-1,1,2], + # array_2b_corrected[np.asarray([0,1,3,4])], + # 2) + coeff = np.polyfit(correct_acf_range, + array_2b_corrected, + 2) + + peak_value += coeff[2]-coeff[1]**2/(4*coeff[0]) + peak_value /= (i+1) + corr_binned_acf_ref[zero_ind_out]=peak_value + elif len(correlation_dimensions) == 2: + for i in range(len(correlation_dimensions)): + # coeff = np.polyfit([-2,-1,1,2], + # np.take(array_2b_corrected, 2, axis=i)[np.asarray([0,1,3,4])], + # 2) + coeff = np.polyfit(correct_acf_range, + np.take(array_2b_corrected, 2, axis=i), + 2) + + peak_value += coeff[2]-coeff[1]**2/(4*coeff[0]) + peak_value /= (i+1) + corr_binned_acf_ref[zero_ind_out]=peak_value + elif len(correlation_dimensions) == 3: + + raise NotImplementedError('3D correlation dimensions has not been implemented yet.') + + # Always the cor_dimension_start axis is taken as it is removed by take # We need to take the zero lag elements from each correlation dimension for i in range(len(correlation_dimensions)): - # Always the cor_dimension_start axis is taken as it is removed by take corr_binned_acf_ref = np.take(corr_binned_acf_ref,zero_ind_out[i],axis=0) + + # for i in range(len(correlation_dimensions)): + # corr_binned_acf_ref = np.take(corr_binned_acf_ref,zero_ind_out[i],axis=0) else: corr_binned_acf_ref = corr_binned_acf + extend_shape = [1] * (len(out_corr.shape) - len(corr_binned_acf.shape)) corr_binned /= np.sqrt(np.reshape(corr_binned_acf,tuple(list(corr_binned_acf.shape) + extend_shape))) extend_shape = [1] * (len(out_corr.shape) - len(corr_binned_acf_ref.shape)) @@ -1737,8 +1972,7 @@ def _ccf(d, ref=None, coordinate=None, intervals=None, options=None): start = range_sampout[i][0] * corr_res_sample[i] * c.step[0], step = [corr_res_sample[i] * c.step[0]], dimension_list=[len(d.data.shape) - len(correlation_dimensions) + i]) - coord_list.append(c_new) - + coord_list.append(c_new) if (norm): unit_name = 'Correlation' else: @@ -1757,4 +1991,4 @@ def _ccf(d, ref=None, coordinate=None, intervals=None, options=None): data_unit = flap.coordinate.Unit(unit_name) ) - return d_out + return d_out \ No newline at end of file diff --git a/flap/tools.py b/flap/tools.py index ede137a..c470f0d 100644 --- a/flap/tools.py +++ b/flap/tools.py @@ -3,6 +3,7 @@ Created on Wed Jan 23 13:18:25 2019 @author: Zoletnik +@coauthor: Lampert Tools for the FLAP module @@ -163,26 +164,28 @@ def submatrix_index(mx_shape, index): """ index_arrays = [] - mx_dim = len(mx_shape) - for i in range(mx_dim): - # Creating a matrix with 1 element in each direction and the - # number of elements in index[i] in the i-th dimension - shape = [1] * mx_dim - shape[i] = index[i].size - # Creating this matrix - ind = np.zeros(tuple(shape),dtype=int) - # Creating a list of indices with 0 at all places except at i where '...' - ind_ind = [0] * mx_dim - ind_ind[i] = ... - ind[tuple(ind_ind)] = index[i] - # Expanding this matrix in all other dimensions - for j in range(mx_dim): - if (j != i): - ind = np.repeat(ind,index[j].size,j) - index_arrays.append(ind) - -# for i in range(len(mx_shape)): #THIS IS A SOLUTION FOR LARGE MATRICES, BUT NOT COMMITED -# index_arrays.append(slice(min(index[i]),max(index[i])+1)) #DUE TO BEING UNTESTED. NEEDS TO BE UNCOMMENTED IF ONE WANTS TO USE IT + +# mx_dim = len(mx_shape) +# for i in range(mx_dim): +# # Creating a matrix with 1 element in each direction and the +# # number of elements in index[i] in the i-th dimension +# shape = [1] * mx_dim +# shape[i] = index[i].size +# # Creating this matrix +# ind = np.zeros(tuple(shape),dtype=int) +# # Creating a list of indices with 0 at all places except at i where '...' +# ind_ind = [0] * mx_dim +# ind_ind[i] = ... +# ind[tuple(ind_ind)] = index[i] +# # Expanding this matrix in all other dimensions +# for j in range(mx_dim): +# if (j != i): +# ind = np.repeat(ind,index[j].size,j) +# index_arrays.append(ind) + + for i in range(len(mx_shape)): #THIS IS A SOLUTION FOR LARGE MATRICES, BUT NOT COMMITED + index_arrays.append(slice(min(index[i]),max(index[i])+1)) + return tuple(index_arrays) @@ -430,6 +433,16 @@ def grid_to_box(xdata,ydata): return xbox,ybox def time_unit_translation(time_unit=None,max_value=None): + + """ + This tool returns the numerical pre-fix of the time_unit. It is almost + obsolate due to the existance of the methos named unit_conversion, but + this script can provide backwards conversion (used by flap_mdsplus.py) + time_unit: can be a string e.g. ms or a number e.g. 1e-3 + max_value: returns a suspected time unit based on this input + e.g.: max_value=1000. + """ + if (str(type(time_unit)) == 'str' or str(type(time_unit)) == ""): _time_unit=time_unit.lower() @@ -440,7 +453,7 @@ def time_unit_translation(time_unit=None,max_value=None): if VERBOSE: print('Time unit: \''+str(_time_unit)+'\'') print('Time unit translation based on values only works for shots under 1000s.') - value_translation=[[1,1e3,1e6,1e9,1e12], + value_translation=[[0.,1e3,1e6,1e9,1e12], ['s','ms','us','ns','ps']] for i in range(len(value_translation[0])-1): if (max_value > value_translation[0][i] and max_value < value_translation[0][i+1]): @@ -476,30 +489,6 @@ def time_unit_translation(time_unit=None,max_value=None): print(_time_unit+' was not found in the translation. Returning 1.') return 1 -def spatial_unit_translation(spatial_unit=None): - _spatial_unit=spatial_unit.lower() - translation={'meters':1, - 'meter':1, - 'm':1, - 'millimeters':1e-3, - 'millimeter':1e-3, - 'mm':1e-3, - 'micrometers':1e-6, - 'micrometer':1e-6, - 'um':1e-6, - 'nanometers':1e-9, - 'nanometer':1e-9, - 'nm':1e-9, - 'picometers':1e-12, - 'picometer':1e-12, - 'pm':1e-12, - } - if (_spatial_unit in translation.keys()): - return translation[_spatial_unit] - else: - print(_spatial_unit+' was not found in the translation. Returning 1.') - return 1 - def unit_conversion(original_unit=None, new_unit=None ): @@ -541,7 +530,7 @@ def unit_conversion(original_unit=None, new_unit_translation=known_conversions_full[keys_full] if original_unit_translation is None: - if len(original_unit) == 1 or len(original_unit) > 3 : # SI units are longer than 3 if using the full name + if ((len(original_unit) == 1) or (len(original_unit) > 3)): # SI units are longer than 3 if using the full name original_unit_translation=1. else: for keys_short in known_conversions_short: @@ -549,7 +538,7 @@ def unit_conversion(original_unit=None, original_unit_translation=known_conversions_short[keys_short] if new_unit_translation is None: - if len(new_unit) == 1 or len(new_unit) > 3: + if ((len(new_unit) == 1) or (len(new_unit) > 3)): new_unit_translation=1. else: for keys_short in known_conversions_short: @@ -557,44 +546,138 @@ def unit_conversion(original_unit=None, new_unit_translation=known_conversions_short[keys_short] if original_unit_translation is None: - print('Unit translation cannot be done for the original unit. Returning 1.') + print('Unit translation cannot be done for the original unit, '+original_unit+'. Returning 1.') if VERBOSE: if len(original_unit) > 3: print('Known conversion units are:') print(known_conversions_full) + print('\n') else: print('Known conversion units are:') print(known_conversions_short) + print('\n') original_unit_translation=1. if new_unit_translation is None: - print('Unit translation cannot be done for the new unit. Returning 1.') + print('Unit translation cannot be done for the new unit, '+new_unit+'. Returning 1.') if VERBOSE: if len(original_unit) > 3: print('Known conversion units are:') print(known_conversions_full) + print('\n') else: print('Known conversion units are:') print(known_conversions_short) + print('\n') new_unit_translation=1. return original_unit_translation/new_unit_translation - + +def polyfit_2D(x=None, + y=None, + values=None, + sigma=None, + order=None, + irregular=False, + return_covariance=False, + return_fit=False): + + if sigma is None: + sigma=np.zeros(values.shape) + sigma[:]=1. + else: + if sigma.shape != sigma.shape: + raise ValueError('The shape of the errors do not match the shape of the values!') + if not irregular: + if len(values.shape) != 2: + raise ValueError('Values are not 2D') + if x is not None and y is not None: + if x.shape != values.shape or y.shape != values.shape: + raise ValueError('There should be as many points as values and their shape should match.') + if order is None: + raise ValueError('The order is not set.') + if (x is None and y is not None) or (x is not None and y is None): + raise ValueError('Either both or neither x and y need to be set.') + if x is None and y is None: + polynom=np.asarray([[i**k * j**l / sigma[i,j] for k in range(order+1) for l in range(order-k+1)] for i in range(values.shape[0]) for j in range(values.shape[1])]) #The actual polynomial calculation + else: + polynom=np.asarray([[x[i,j]**k * y[i,j]**l / sigma[i,j] for k in range(order+1) for l in range(order-k+1)] for i in range(values.shape[0]) for j in range(values.shape[1])]) #The actual polynomial calculation + + original_shape=values.shape + values_reshape=np.reshape(values/sigma, values.shape[0]*values.shape[1]) + covariance_matrix=np.linalg.inv(np.dot(polynom.T,polynom)) - - -#import matplotlib.pyplot as plt - -#plt.clf() -#ydata, xdata = np.meshgrid(np.arange(10),np.arange(20)) -#xdata = xdata.astype(float) -#ydata = ydata.astype(float) -#xdata += ydata*0.1 -#ydata += xdata*0.2 -#xbox, ybox = grid_to_box(xdata,ydata) -#data = (xdata + ydata) -#plt.pcolormesh(xbox,ybox,np.transpose(data),cmap='Greys') -#plt.scatter(xdata.flatten(), ydata.flatten()) + coefficients=np.dot(np.dot(covariance_matrix,polynom.T),values_reshape) #This performs the linear regression + + if not return_fit: + if return_covariance: + return (coefficients, covariance_matrix) + else: + return coefficients + else: + return np.reshape(np.dot(polynom,coefficients),original_shape) + else: + if x.shape != y.shape or x.shape != values.shape: + raise ValueError('The points should be an [n,2] vector.') + if len(x.shape) != 1 or len(y.shape) != 1 or len(values_reshape.shape) != 1: + raise ValueError('x,y,values should be a 1D vector when irregular is set.') + if order is None: + raise ValueError('The order is not set.') + polynom=np.asarray([[x[i]**k * y[i]**l for k in range(order+1) for l in range(order-k+1)] for i in range(values.shape[0])]) #The actual polynomial calculation + if not return_fit: + return np.dot(np.dot(np.linalg.inv(np.dot(polynom.T,polynom)),polynom.T),values) #This performs the linear regression + else: + return np.dot(polynom,np.dot(np.dot(np.linalg.inv(np.dot(polynom.T,polynom)),polynom.T),values)) + +def reorder_2d_ccf_indices(res, #Original result of the ccf calculation from the fft + cd, #Correlation dimensions + ind_in1, #Input indices 1 from 1D + ind_in2, #Input indices 2 from 1D + ind_out1, #Output indices 1 from 1D + ind_out2): #Output indices 2 from 1D + """ + This helper function reorganizes the 2D cross-correlation or auto-correlation + functions in order to have the maximum in the middle of the matrix and + not somewhere around the corner. Used in _ccf in spectral_analysis.py + """ + + + corr = np.empty(res.shape,dtype=res.dtype) + + ind_out_1=ind_out1[:] + ind_out_2=ind_out1[:] + ind_out_3=ind_out1[:] + ind_out_4=ind_out1[:] + + ind_out_1[cd[0]]=ind_out1[cd[0]] + ind_out_1[cd[1]]=ind_out1[cd[1]] + ind_out_2[cd[0]]=ind_out2[cd[0]] + ind_out_2[cd[1]]=ind_out1[cd[1]] + ind_out_3[cd[0]]=ind_out2[cd[0]] + ind_out_3[cd[1]]=ind_out2[cd[1]] + ind_out_4[cd[0]]=ind_out1[cd[0]] + ind_out_4[cd[1]]=ind_out2[cd[1]] + + ind_in_1=ind_in1[:] + ind_in_2=ind_in1[:] + ind_in_3=ind_in1[:] + ind_in_4=ind_in1[:] + + ind_in_1[cd[0]]=ind_in1[cd[0]] + ind_in_1[cd[1]]=ind_in1[cd[1]] + ind_in_2[cd[0]]=ind_in2[cd[0]] + ind_in_2[cd[1]]=ind_in1[cd[1]] + ind_in_3[cd[0]]=ind_in2[cd[0]] + ind_in_3[cd[1]]=ind_in2[cd[1]] + ind_in_4[cd[0]]=ind_in1[cd[0]] + ind_in_4[cd[1]]=ind_in2[cd[1]] + + corr[tuple(ind_out_1)] = res[tuple(ind_in_1)] + corr[tuple(ind_out_2)] = res[tuple(ind_in_2)] + corr[tuple(ind_out_3)] = res[tuple(ind_in_3)] + corr[tuple(ind_out_4)] = res[tuple(ind_in_4)] + + return corr \ No newline at end of file