diff --git a/pycqed/analysis/fitting_models.py b/pycqed/analysis/fitting_models.py index d6e238b9a..e884e1bcf 100644 --- a/pycqed/analysis/fitting_models.py +++ b/pycqed/analysis/fitting_models.py @@ -1313,6 +1313,7 @@ def sum_int(x, y): CosModel2 = lmfit.Model(CosFunc2) ResonatorArch = lmfit.Model(resonator_flux) ExpDecayModel = lmfit.Model(ExpDecayFunc) +DoubleExpDecayModel = lmfit.Model(DoubleExpDecayFunc) TripleExpDecayModel = lmfit.Model(TripleExpDecayFunc) ExpDecayModel.guess = exp_dec_guess # todo: fix ExpDampOscModel = lmfit.Model(ExpDampOscFunc) diff --git a/pycqed/analysis/measurement_analysis.py b/pycqed/analysis/measurement_analysis.py index 4a5793d15..b123034dd 100644 --- a/pycqed/analysis/measurement_analysis.py +++ b/pycqed/analysis/measurement_analysis.py @@ -1099,7 +1099,7 @@ class TD_Analysis(MeasurementAnalysis): def __init__(self, NoCalPoints=4, center_point=31, make_fig=True, zero_coord=None, one_coord=None, cal_points=None, rotate_and_normalize=True, plot_cal_points=True, - for_ef=False, qb_name=None, **kw): + for_ef=False, qb_name=None, fit_double_exp = False, **kw): kw['cal_points'] = cal_points self.NoCalPoints = NoCalPoints self.normalized_values = [] @@ -1113,6 +1113,7 @@ def __init__(self, NoCalPoints=4, center_point=31, make_fig=True, self.center_point = center_point self.plot_cal_points = plot_cal_points self.for_ef = for_ef + self.fit_double_exp = fit_double_exp # Always call parent class constructor before assigning attributes. super(TD_Analysis, self).__init__(qb_name=qb_name, **kw) @@ -4664,27 +4665,62 @@ def __init__(self, label='T1', **kw): def fit_T1(self, **kw): - # Guess for params - fit_mods.ExpDecayModel.set_param_hint('amplitude', - value=1, - min=0, - max=2) - fit_mods.ExpDecayModel.set_param_hint('tau', - value=self.sweep_points[1] * 50, - min=self.sweep_points[1], - max=self.sweep_points[-1] * 1000) - fit_mods.ExpDecayModel.set_param_hint('offset', - value=0, - vary=False) - fit_mods.ExpDecayModel.set_param_hint('n', - value=1, - vary=False) - self.params = fit_mods.ExpDecayModel.make_params() + if self.fit_double_exp == True: + # Guess for params + from pycqed.analysis.fitting_models import DoubleExpDecayFunc + DoubleExpDecayModel = lmfit.Model(DoubleExpDecayFunc) + + DoubleExpDecayModel.set_param_hint('amp1', + value=0.5, + min=0, + max=1) + DoubleExpDecayModel.set_param_hint('amp2', + value=0.5, + min=0, + max=1) + DoubleExpDecayModel.set_param_hint('tau1', + value=self.sweep_points[1] * 50, + min=self.sweep_points[1], + max=self.sweep_points[-1] * 1000) + DoubleExpDecayModel.set_param_hint('tau2', + value=self.sweep_points[3] * 50, + min=self.sweep_points[1], + max=self.sweep_points[-1] * 1000) + DoubleExpDecayModel.set_param_hint('offset', + value=0, + vary=False) + DoubleExpDecayModel.set_param_hint('n', + value=1, + vary=False) + self.params = DoubleExpDecayModel.make_params() + + fit_res = DoubleExpDecayModel.fit(data=self.normalized_data_points, + t=self.sweep_points[:- + self.NoCalPoints], + params=self.params) - fit_res = fit_mods.ExpDecayModel.fit(data=self.normalized_data_points, - t=self.sweep_points[:- - self.NoCalPoints], - params=self.params) + else: + # Guess for params + fit_mods.ExpDecayModel.set_param_hint('amplitude', + value=1, + min=0, + max=2) + fit_mods.ExpDecayModel.set_param_hint('tau', + value=self.sweep_points[1] * 50, + min=self.sweep_points[1], + max=self.sweep_points[-1] * 1000) + fit_mods.ExpDecayModel.set_param_hint('offset', + value=0, + vary=False) + fit_mods.ExpDecayModel.set_param_hint('n', + value=1, + vary=False) + self.params = fit_mods.ExpDecayModel.make_params() + + fit_res = fit_mods.ExpDecayModel.fit(data=self.normalized_data_points, + t=self.sweep_points[:- + self.NoCalPoints], + params=self.params) if kw.get('print_fit_results', False): print(fit_res.fit_report()) @@ -4707,61 +4743,123 @@ def run_default_analysis(self, show=False, close_file=False, **kw): self.fit_res = self.fit_T1(**kw) self.save_fitted_parameters(fit_res=self.fit_res, var_name='F|1>') - # Create self.T1 and self.T1_stderr and save them - self.get_measured_T1() # in seconds - self.save_computed_parameters( - self.T1_dict, var_name=self.value_names[0]) + if self.fit_double_exp == True: + self.get_measured_double_T1() # in seconds + self.save_computed_parameters( + self.T1_dict, var_name=self.value_names[0]) - T1_micro_sec = self.T1_dict['T1'] * 1e6 - T1_err_micro_sec = self.T1_dict['T1_stderr'] * 1e6 - # Print T1 and error on screen - if kw.get('print_parameters', False): - print('T1 = {:.5f} ('.format(T1_micro_sec) + 'μs) \t ' - 'T1 StdErr = {:.5f} ('.format( - T1_err_micro_sec) + 'μs)') + T1_1_micro_sec = self.fit_res.uvars['tau1'].n * 1e6 + T1_1_err_micro_sec = self.fit_res.uvars['tau1'].s * 1e6 + T1_2_micro_sec = self.fit_res.uvars['tau2'].n * 1e6 + T1_2_err_micro_sec = self.fit_res.uvars['tau2'].s * 1e6 + # Print T1 and error on screen + if kw.get('print_parameters', False): + print('T1_1 = {:.5f} ('.format(T1_1_micro_sec) + 'μs) \t ' + 'T1_1 StdErr = {:.5f} ('.format( + T1_1_err_micro_sec) + 'μs)') + print('T1_2 = {:.5f} ('.format(T1_2_micro_sec) + 'μs) \t ' + 'T1_2 StdErr = {:.5f} ('.format( + T1_2_err_micro_sec) + 'μs)') + + # Plot best fit and initial fit + data + if self.make_fig: + + units = SI_prefix_and_scale_factor(val=max(abs(self.ax.get_xticks())), + unit=self.sweep_unit[0])[1] + # We will not bother retrieving old T1 values from dataset + old_vals = '' - # Plot best fit and initial fit + data - if self.make_fig: + best_vals = self.fit_res.best_values + + textstr = ('$T1_1$ = {:.3f} '.format(T1_1_micro_sec) + + units + + ' $\pm$ {:.3f} '.format(T1_1_err_micro_sec) + + units + + '\n$T1_2$ = {:.3f} '.format(T1_2_micro_sec) + + units + + ' $\pm$ {:.3f} '.format(T1_2_err_micro_sec) + + units + old_vals) + + self.fig.text(0.5, 0.9, textstr, transform=self.ax.transAxes, + fontsize=self.font_size, + verticalalignment='top', + horizontalalignment='center', + bbox=self.box_props) - units = SI_prefix_and_scale_factor(val=max(abs(self.ax.get_xticks())), - unit=self.sweep_unit[0])[1] - # We will not bother retrieving old T1 values from dataset - old_vals = '' + if show_guess: + self.ax.plot(self.sweep_points[:-self.NoCalPoints], + self.fit_res.init_fit, 'k--', linewidth=self.line_width) + + best_vals = self.fit_res.best_values + t = np.linspace(self.sweep_points[0], + self.sweep_points[-self.NoCalPoints], 1000) + + y = fit_mods.DoubleExpDecayFunc( + t, + tau1=best_vals['tau1'], + tau2=best_vals['tau2'], + n=best_vals['n'], + amp1=best_vals['amp1'], + amp2=best_vals['amp2'], + offset=best_vals['offset']) + + else: + + # Create self.T1 and self.T1_stderr and save them + self.get_measured_T1() # in seconds + self.save_computed_parameters( + self.T1_dict, var_name=self.value_names[0]) + + T1_micro_sec = self.T1_dict['T1'] * 1e6 + T1_err_micro_sec = self.T1_dict['T1_stderr'] * 1e6 + # Print T1 and error on screen + if kw.get('print_parameters', False): + print('T1 = {:.5f} ('.format(T1_micro_sec) + 'μs) \t ' + 'T1 StdErr = {:.5f} ('.format( + T1_err_micro_sec) + 'μs)') - textstr = ('$T_1$ = {:.3f} '.format(T1_micro_sec) + - units + - ' $\pm$ {:.3f} '.format(T1_err_micro_sec) + - units + old_vals) + # Plot best fit and initial fit + data + if self.make_fig: - self.fig.text(0.5, 0, textstr, transform=self.ax.transAxes, - fontsize=self.font_size, - verticalalignment='top', - horizontalalignment='center', - bbox=self.box_props) + units = SI_prefix_and_scale_factor(val=max(abs(self.ax.get_xticks())), + unit=self.sweep_unit[0])[1] + # We will not bother retrieving old T1 values from dataset + old_vals = '' - if show_guess: - self.ax.plot(self.sweep_points[:-self.NoCalPoints], - self.fit_res.init_fit, 'k--', linewidth=self.line_width) + textstr = ('$T_1$ = {:.3f} '.format(T1_micro_sec) + + units + + ' $\pm$ {:.3f} '.format(T1_err_micro_sec) + + units + old_vals) - best_vals = self.fit_res.best_values - t = np.linspace(self.sweep_points[0], - self.sweep_points[-self.NoCalPoints], 1000) + self.fig.text(0.5, 0, textstr, transform=self.ax.transAxes, + fontsize=self.font_size, + verticalalignment='top', + horizontalalignment='center', + bbox=self.box_props) - y = fit_mods.ExpDecayFunc( - t, tau=best_vals['tau'], - n=best_vals['n'], - amplitude=best_vals['amplitude'], - offset=best_vals['offset']) + if show_guess: + self.ax.plot(self.sweep_points[:-self.NoCalPoints], + self.fit_res.init_fit, 'k--', linewidth=self.line_width) - self.ax.plot(t, y, 'r-', linewidth=self.line_width) + best_vals = self.fit_res.best_values + t = np.linspace(self.sweep_points[0], + self.sweep_points[-self.NoCalPoints], 1000) - self.ax.locator_params(axis='x', nbins=6) + y = fit_mods.ExpDecayFunc( + t, tau=best_vals['tau'], + n=best_vals['n'], + amplitude=best_vals['amplitude'], + offset=best_vals['offset']) - if show: - plt.show() + self.ax.plot(t, y, 'r-', linewidth=self.line_width) - self.save_fig( - self.fig, figname=self.measurementstring + '_Fit', **kw) + self.ax.locator_params(axis='x', nbins=6) + + if show: + plt.show() + + self.save_fig( + self.fig, figname=self.measurementstring + '_Fit', **kw) if close_file: self.data_file.close() @@ -4782,6 +4880,24 @@ def get_measured_T1(self): return self.T1, T1_stderr + def get_measured_double_T1(self): + fitted_pars = self.data_file['Analysis']['Fitted Params F|1>'] + + self.T1_1 = fitted_pars['tau1'].attrs['value'] + T1_1_stderr = fitted_pars['tau1'].attrs['stderr'] + + self.T1_2 = fitted_pars['tau2'].attrs['value'] + T1_2_stderr = fitted_pars['tau2'].attrs['stderr'] + # T1 = self.fit_res.params['tau'].value + # T1_stderr = self.fit_res.params['tau'].stderr + + # return as dict for use with "save_computed_parameters"; units are + # seconds + self.T1_dict = {'T1_1': self.T1_1, 'T1_1_stderr': T1_1_stderr, + 'T1_2': self.T1_2, 'T1_2_stderr': T1_2_stderr} + + return self.T1_1, self.T1_2 + class Ramsey_Analysis(TD_Analysis): """ diff --git a/pycqed/analysis_v2/multi_analysis.py b/pycqed/analysis_v2/multi_analysis.py index de2133393..00fa7b67d 100644 --- a/pycqed/analysis_v2/multi_analysis.py +++ b/pycqed/analysis_v2/multi_analysis.py @@ -470,7 +470,8 @@ def process_data(self): self.proc_data_dict['{}_nor_data'.format(q)] = nor_data ### fit to normalized data ### - times = self.proc_data_dict['{}_times'.format(q)] + # times = self.proc_data_dict['{}_times'.format(q)] + times = self.times fit_mods.ExpDecayModel.set_param_hint('amplitude', value=1, @@ -502,18 +503,19 @@ def prepare_plots(self): 'plotfn': plot_Multi_T1, 'data': self.proc_data_dict, 'qubit': q, + 'times': self.times, 'title': 'T1_'+q+'_' +self.raw_data_dict['timestamps'][0], 'plotsize': (10,10) } -def plot_Multi_T1(qubit, data,title, ax=None, **kwargs): +def plot_Multi_T1(qubit, times, data,title, ax=None, **kwargs): if ax is None: fig, ax = plt.subplots(figsize=(15,15)) q = qubit - times = data['{}_times'.format(q)]*1e6 + # times = data['{}_times'.format(q)]*1e6 nor_data = data['{}_nor_data'.format(q)] fit_data = data['{}_fitted_data'.format(q)] T1 = data['quantities_of_interest'][q]['tau']*1e6 diff --git a/pycqed/instrument_drivers/meta_instrument/HAL_Device.py b/pycqed/instrument_drivers/meta_instrument/HAL_Device.py index 1249ff4b5..166fc0239 100644 --- a/pycqed/instrument_drivers/meta_instrument/HAL_Device.py +++ b/pycqed/instrument_drivers/meta_instrument/HAL_Device.py @@ -5644,6 +5644,82 @@ def measure_ramsey_tomo( a = ma2.tqg.Two_qubit_gate_tomo_Analysis(label='Ramsey', n_pairs=len(qubit_ramsey)) return a.qoi + + def measure_T1_TLS( + self, + q0: str, + q0_amp: float, + q0_pulse_length: float, # in [s] + q_parks: list, + times=None, + close_fig=True, + analyze=True, + MC: Optional[MeasurementControl] = None, + disable_metadata: bool = False, + auto = True, + fit_double_exp = False + ): + """ + N.B. this is a good example for a generic timedomain experiment using the HAL_Transmon. + """ + + if MC is None: + MC = self.instr_MC.get_instr() + + for qubit in q_parks: + QUBIT = self.find_instrument(qubit) + flux_lm_QUBIT = self.find_instrument(QUBIT.instr_LutMan_Flux()) + flux_lm_QUBIT.sq_length(q0_pulse_length) + flux_lm_QUBIT.park_length(q0_pulse_length) + flux_lm_QUBIT.sq_amp(0.25) + flux_lm_QUBIT.park_amp(0.25) + flux_lm_QUBIT.cfg_awg_channel_amplitude(0.3) + self.prepare_for_timedomain(qubits = q_parks, bypass_flux = False) + + Q0 = self.find_instrument(q0) + flux_lm_Q0 = self.find_instrument(Q0.instr_LutMan_Flux()) + flux_lm_Q0.cfg_awg_channel_amplitude(q0_amp) + flux_lm_Q0.sq_amp(0.25) + flux_lm_Q0.sq_length(q0_pulse_length) + self.prepare_for_timedomain(qubits = [q0], bypass_flux = False) + + if times is None: + times = np.linspace(0, Q0.T1() * 4, 31) + + dt = times[1] - times[0] + + times = np.concatenate([times, (times[-1] + 1 * dt, + times[-1] + 2 * dt, + times[-1] + 3 * dt, + times[-1] + 4 * dt)]) + + q0_idx = Q0.cfg_qubit_nr() + q_parks_idx = [] + for q in q_parks: + q_parks_idx.append(self.find_instrument(q).cfg_qubit_nr()) + + p = mqo.T1_TLS( + q0_idx = q0_idx, + q_parks_idx = q_parks_idx, + platf_cfg = self.cfg_openql_platform_fn(), + times = times + ) + + s = swf.OpenQL_Sweep( + openql_program=p, + parameter_name='Time', + unit='s', + CCL=self.instr_CC.get_instr() + ) + MC.set_sweep_function(s) + MC.set_sweep_points(times) + d = self.get_int_avg_det() + MC.set_detector_function(d) + MC.run('T1' + self.msmt_suffix, disable_snapshot_metadata = disable_metadata) + + if analyze: + a = ma.T1_Analysis(auto=auto, fit_double_exp = fit_double_exp, close_fig=True) + return a.T1 ########################################################################## # public functions: calibrate diff --git a/pycqed/instrument_drivers/meta_instrument/inspire_dependency_graph.py b/pycqed/instrument_drivers/meta_instrument/inspire_dependency_graph.py index cd8611bd7..4e4991584 100644 --- a/pycqed/instrument_drivers/meta_instrument/inspire_dependency_graph.py +++ b/pycqed/instrument_drivers/meta_instrument/inspire_dependency_graph.py @@ -1128,28 +1128,25 @@ def create_dep_graph(self, CZname + ' SQP Pulsed') for qubit_group in qubit_groups: - if pair[0] in qubit_group: - + if pair[0] in qubit_group['qubit_list']: self.add_edge(CZname + ' SQP Pulsed', f"Flipping_{qubit_group['name']}") if(doSQPs): self.add_edge('Prep Inspire', CZname + ' SQP Static') - - for qubit_group in qubit_groups: - if pair[1] in qubit_group: - self.add_edge(CZname + ' SQP Static', - f"Flipping_{qubit_group['name']}") + for qubit_group in qubit_groups: + if pair[1] in qubit_group['qubit_list']: + self.add_edge(CZname + ' SQP Static', + f"Flipping_{qubit_group['name']}") if(parkingqubitexists): ##### need to add specific routing for parked qubit. ##### the one below does not do the job!!!!! self.add_edge('Prep Inspire', CZname + ' SQP Parked') - for qubit_group in qubit_groups: - if pair[2] in qubit_group: + if pair[2] in qubit_group['qubit_list']: self.add_edge(CZname + ' SQP Parked', f"Flipping_{qubit_group['name']}") diff --git a/pycqed/measurement/openql_experiments/multi_qubit_oql.py b/pycqed/measurement/openql_experiments/multi_qubit_oql.py index 36052346b..7f8198c9c 100644 --- a/pycqed/measurement/openql_experiments/multi_qubit_oql.py +++ b/pycqed/measurement/openql_experiments/multi_qubit_oql.py @@ -3724,6 +3724,65 @@ def multi_qubit_motzoi(qubits_idx: list, platf_cfg: str = None) -> OqlProgram: p.compile() return p +def T1_TLS(q0_idx: int, + q_parks_idx: list, + platf_cfg: str, + times: List[float], + ): + """ + Single qubit T1 sequence. + Writes output files to the directory specified in openql. + Output directory is set as an attribute to the program for convenience. + + Input pars: + times: the list of waiting times for each T1 element + qubit_idx: int specifying the target qubit (starting at 0) + platf_cfg: filename of the platform config file + Returns: + p: OpenQL Program object + + + """ + p = OqlProgram('T1_TLS', platf_cfg) + + times = np.concatenate([np.array([0.0]), times]) + + for i, time in enumerate(times[:-5]): + k = p.create_kernel('T1_TLS_{}'.format(i)) + k.prepz(q0_idx) + for q_park in q_parks_idx: + k.prepz(q_park) + k.barrier([]) # alignment workaround + + k.gate('rx180', [q0_idx]) + k.barrier([]) # alignment workaround + + if i == 0: + k.measure(q0_idx) + p.add_kernel(k) + else: + k.gate('sf_square', [q0_idx]) + for q_park in q_parks_idx: + k.gate('sf_square', [q_park]) # square pulse + k.barrier([]) # alignment workaround + + wait_nanoseconds = int(round(time/1e-9)) + k.gate("wait", [q0_idx], wait_nanoseconds) + k.barrier([]) # alignment workaround + + k.gate('sf_square', [q0_idx]) + for q_park in q_parks_idx: + k.gate('sf_square', [q_park]) # square pulse + k.barrier([]) # alignment workaround + + k.measure(q0_idx) + p.add_kernel(k) + + # adding the calibration points + p.add_single_qubit_cal_points(qubit_idx=q0_idx) + + p.compile() + return p # def Ramsey_tomo(qR: int, # qC: int,