diff --git a/qiskit_experiments/library/calibration/fine_drag_cal.py b/qiskit_experiments/library/calibration/fine_drag_cal.py index c79bad7562..74ff341657 100644 --- a/qiskit_experiments/library/calibration/fine_drag_cal.py +++ b/qiskit_experiments/library/calibration/fine_drag_cal.py @@ -66,8 +66,6 @@ def __init__( auto_update=auto_update, ) - self.set_transpile_options(basis_gates=["sx", schedule_name, "rz"]) - @classmethod def _default_experiment_options(cls) -> Options: """Default experiment options. diff --git a/qiskit_experiments/test/fake_service.py b/qiskit_experiments/test/fake_service.py index 5029193921..deb79b6616 100644 --- a/qiskit_experiments/test/fake_service.py +++ b/qiskit_experiments/test/fake_service.py @@ -111,33 +111,38 @@ def create_experiment( # backend - the query methods `experiment` and `experiments` are supposed to return an # an instantiated backend object, and not only the backend name. We assume that the fake # service works with the fake backend (class FakeBackend). - self.exps = pd.concat( + row = pd.DataFrame( [ - self.exps, - pd.DataFrame( - [ - { - "experiment_type": experiment_type, - "experiment_id": experiment_id, - "parent_id": parent_id, - "backend_name": backend_name, - "metadata": metadata, - "job_ids": job_ids, - "tags": tags, - "notes": notes, - "share_level": kwargs.get("share_level", None), - "device_components": [], - "start_datetime": datetime(2022, 1, 1) - + timedelta(hours=len(self.exps)), - "figure_names": [], - "backend": FakeBackend(backend_name=backend_name), - } - ], - columns=self.exps.columns, - ), + { + "experiment_type": experiment_type, + "experiment_id": experiment_id, + "parent_id": parent_id, + "backend_name": backend_name, + "metadata": metadata, + "job_ids": job_ids, + "tags": tags, + "notes": notes, + "share_level": kwargs.get("share_level", None), + "device_components": [], + "start_datetime": datetime(2022, 1, 1) + timedelta(hours=len(self.exps)), + "figure_names": [], + "backend": FakeBackend(backend_name=backend_name), + } ], - ignore_index=True, + columns=self.exps.columns, ) + if len(self.exps) > 0: + self.exps = pd.concat( + [ + self.exps, + row, + ], + ignore_index=True, + ) + else: + # Avoid the FutureWarning on concatenating empty DataFrames + # introduced in https://github.com/pandas-dev/pandas/pull/52532 + self.exps = row return experiment_id @@ -293,35 +298,39 @@ def create_analysis_result( # `IBMExperimentService.create_analysis_result`. Since `DbExperimentData` does not set it # via kwargs (as it does with chisq), the user cannot control the time and the service # alone decides about it. Here we've chosen to set the start date of the experiment. - self.results = pd.concat( + row = pd.DataFrame( [ - self.results, - pd.DataFrame( - [ - { - "result_data": result_data, - "result_id": result_id, - "result_type": result_type, - "device_components": device_components, - "experiment_id": experiment_id, - "quality": quality, - "verified": verified, - "tags": tags, - "backend_name": self.exps.loc[self.exps.experiment_id == experiment_id] - .iloc[0] - .backend_name, - "chisq": kwargs.get("chisq", None), - "creation_datetime": self.exps.loc[ - self.exps.experiment_id == experiment_id - ] - .iloc[0] - .start_datetime, - } - ] - ), - ], - ignore_index=True, + { + "result_data": result_data, + "result_id": result_id, + "result_type": result_type, + "device_components": device_components, + "experiment_id": experiment_id, + "quality": quality, + "verified": verified, + "tags": tags, + "backend_name": self.exps.loc[self.exps.experiment_id == experiment_id] + .iloc[0] + .backend_name, + "chisq": kwargs.get("chisq", None), + "creation_datetime": self.exps.loc[self.exps.experiment_id == experiment_id] + .iloc[0] + .start_datetime, + } + ] ) + if len(self.results) > 0: + self.results = pd.concat( + [ + self.results, + row, + ], + ignore_index=True, + ) + else: + # Avoid the FutureWarning on concatenating empty DataFrames + # introduced in https://github.com/pandas-dev/pandas/pull/52532 + self.results = row # a helper method for updating the experiment's device components, see usage below def add_new_components(expcomps): diff --git a/qiskit_experiments/test/pulse_backend.py b/qiskit_experiments/test/pulse_backend.py index 0f0627afbd..70267440fc 100644 --- a/qiskit_experiments/test/pulse_backend.py +++ b/qiskit_experiments/test/pulse_backend.py @@ -96,7 +96,7 @@ def __init__( None, name="PulseBackendV2", description="A PulseBackend simulator", - online_date=datetime.datetime.utcnow(), + online_date=datetime.datetime.now(datetime.timezone.utc), backend_version="0.0.1", ) diff --git a/qiskit_experiments/visualization/drawers/mpl_drawer.py b/qiskit_experiments/visualization/drawers/mpl_drawer.py index 8ddf696919..d2e6956c15 100644 --- a/qiskit_experiments/visualization/drawers/mpl_drawer.py +++ b/qiskit_experiments/visualization/drawers/mpl_drawer.py @@ -147,6 +147,31 @@ def format_canvas(self): else: all_axes = [self._axis] + # Set axes scale. This needs to be done before anything tries to work with + # the axis limits because if no limits or data are set explicitly the + # default limits depend on the scale method (for example, the minimum + # value is 0 for linear scaling but not for log scaling). + def signed_sqrt(x): + return np.sign(x) * np.sqrt(abs(x)) + + def signed_square(x): + return np.sign(x) * x**2 + + for ax_type in ("x", "y"): + for sub_ax in all_axes: + scale = self.figure_options.get(f"{ax_type}scale") + if ax_type == "x": + mpl_setscale = sub_ax.set_xscale + else: + mpl_setscale = sub_ax.set_yscale + + # Apply non linear axis spacing + if scale is not None: + if scale == "quadratic": + mpl_setscale("function", functions=(signed_square, signed_sqrt)) + else: + mpl_setscale(scale) + # Get axis formatter from drawing options formatter_opts = {} for ax_type in ("x", "y"): @@ -181,12 +206,6 @@ def format_canvas(self): "max_ax_vals": max_vals, } - def signed_sqrt(x): - return np.sign(x) * np.sqrt(abs(x)) - - def signed_square(x): - return np.sign(x) * x**2 - for i, sub_ax in enumerate(all_axes): # Add data labels if there are multiple labels registered per sub_ax. _, labels = sub_ax.get_legend_handles_labels() @@ -197,18 +216,15 @@ def signed_square(x): limit = formatter_opts[ax_type]["limit"][i] unit = formatter_opts[ax_type]["unit"][i] unit_scale = formatter_opts[ax_type]["unit_scale"][i] - scale = self.figure_options.get(f"{ax_type}scale") min_ax_vals = formatter_opts[ax_type]["min_ax_vals"] max_ax_vals = formatter_opts[ax_type]["max_ax_vals"] share_axis = self.figure_options.get(f"share{ax_type}") if ax_type == "x": - mpl_setscale = sub_ax.set_xscale mpl_axis_obj = getattr(sub_ax, "xaxis") mpl_setlimit = sub_ax.set_xlim mpl_share = sub_ax.sharex else: - mpl_setscale = sub_ax.set_yscale mpl_axis_obj = getattr(sub_ax, "yaxis") mpl_setlimit = sub_ax.set_ylim mpl_share = sub_ax.sharey @@ -219,13 +235,6 @@ def signed_square(x): else: limit = min_ax_vals[i], max_ax_vals[i] - # Apply non linear axis spacing - if scale is not None: - if scale == "quadratic": - mpl_setscale("function", functions=(signed_square, signed_sqrt)) - else: - mpl_setscale(scale) - # Create formatter for axis tick label notation if unit and unit_scale: # If value is specified, automatically scale axis magnitude diff --git a/test/base.py b/test/base.py index 028b549b7b..fc73f5665e 100644 --- a/test/base.py +++ b/test/base.py @@ -99,6 +99,26 @@ def setUpClass(cls): super().setUpClass() warnings.filterwarnings("error", category=DeprecationWarning) + # Tests should not generate any warnings unless testing those + # warnings. In that case, the test should catch the warning + # assertWarns or warnings.catch_warnings. + warnings.filterwarnings("error", module="qiskit_experiments") + # Ideally, changes introducing pending deprecations should include + # alternative code paths and not need to generate warnings in the + # tests but until this exception is necessary until the use of the + # deprecated ScatterTable methods are removed. + warnings.filterwarnings( + "default", + module="qiskit_experiments", + message=".*Curve data uses dataframe representation.*", + category=PendingDeprecationWarning, + ) + warnings.filterwarnings( + "default", + module="qiskit_experiments", + message=".*The curve data representation is replaced with dataframe format.*", + category=PendingDeprecationWarning, + ) # Some functionality may be deprecated in Qiskit Experiments. If # the deprecation warnings aren't filtered, the tests will fail as diff --git a/test/calibration/test_calibrations.py b/test/calibration/test_calibrations.py index 9e301839aa..9cbdab4c46 100644 --- a/test/calibration/test_calibrations.py +++ b/test/calibration/test_calibrations.py @@ -14,6 +14,7 @@ from test.base import QiskitExperimentsTestCase import os +import unittest import uuid from collections import defaultdict from datetime import datetime, timezone, timedelta @@ -534,14 +535,14 @@ def test_default_schedules(self): xp3 = self.cals.get_schedule("xp", (3,)) # Check that xp0 is Play(Gaussian(160, 0.15, 40), 0) - self.assertTrue(isinstance(xp0.instructions[0][1].pulse, Gaussian)) + self.assertTrue(xp0.instructions[0][1].pulse.pulse_type == "Gaussian") self.assertEqual(xp0.instructions[0][1].channel, DriveChannel(0)) self.assertEqual(xp0.instructions[0][1].pulse.amp, 0.15) self.assertEqual(xp0.instructions[0][1].pulse.sigma, 40) self.assertEqual(xp0.instructions[0][1].pulse.duration, 160) # Check that xp3 is Play(Drag(160, 0.25, 40, 10), 3) - self.assertTrue(isinstance(xp3.instructions[0][1].pulse, Drag)) + self.assertTrue(xp3.instructions[0][1].pulse.pulse_type == "Drag") self.assertEqual(xp3.instructions[0][1].channel, DriveChannel(3)) self.assertEqual(xp3.instructions[0][1].pulse.amp, 0.25) self.assertEqual(xp3.instructions[0][1].pulse.sigma, 40) @@ -590,8 +591,8 @@ def test_replace_schedule(self): # For completeness we check that schedule that comes out. sched_cal = self.cals.get_schedule("xp", (3,)) - self.assertTrue(isinstance(sched_cal.instructions[0][1].pulse, Drag)) - self.assertTrue(isinstance(sched_cal.instructions[1][1].pulse, Drag)) + self.assertTrue(sched_cal.instructions[0][1].pulse.pulse_type == "Drag") + self.assertTrue(sched_cal.instructions[1][1].pulse.pulse_type == "Drag") self.assertEqual(sched_cal.instructions[0][1].pulse.amp, 0.125) self.assertEqual(sched_cal.instructions[1][1].pulse.amp, 0.125) @@ -1617,10 +1618,13 @@ def tearDown(self): if os.path.exists(self._prefix + file): os.remove(self._prefix + file) - def test_save_load_parameter_values(self): + def test_save_load_parameter_values_csv(self): """Test that we can save and load parameter values.""" + # NOTE: This is a legacy test that can be removed when csv support is + # removed from Calibrations.save - with self.assertWarns(DeprecationWarning): + # Expect user warning about schedules, deprecation warning about csv + with self.assertWarns((UserWarning, DeprecationWarning)): self.cals.save("csv", overwrite=True, file_prefix=self._prefix) self.assertEqual(self.cals.get_parameter_value("amp", (3,), "xp"), 0.1) @@ -1645,11 +1649,11 @@ def test_save_load_parameter_values(self): self.assertTrue(isinstance(val, float)) # Check that we cannot rewrite files as they already exist. - with self.assertWarns(DeprecationWarning): - with self.assertRaises(CalibrationError): + with self.assertRaises(CalibrationError): + with self.assertWarns((UserWarning, DeprecationWarning)): self.cals.save("csv", file_prefix=self._prefix) - with self.assertWarns(DeprecationWarning): + with self.assertWarns((UserWarning, DeprecationWarning)): self.cals.save("csv", overwrite=True, file_prefix=self._prefix) def test_alternate_date_formats(self): @@ -1659,13 +1663,10 @@ def test_alternate_date_formats(self): value = ParameterValue(0.222, date_time=new_date) self.cals.add_parameter_value(value, "amp", (3,), "xp") - with self.assertWarns(DeprecationWarning): - self.cals.save("csv", overwrite=True, file_prefix=self._prefix) - self.cals._params = defaultdict(list) - with self.assertWarns(DeprecationWarning): - self.cals.load_parameter_values(self._prefix + "parameter_values.csv") + self.cals.save("json", overwrite=True, file_prefix=self._prefix) + self.cals.load(self._prefix + ".json") - def test_save_load_library(self): + def test_save_load_library_csv(self): """Test that we can load and save a library. These libraries contain both parameters with schedules and parameters without @@ -1678,7 +1679,7 @@ def test_save_load_library(self): cals.parameters_table() - with self.assertWarns(DeprecationWarning): + with self.assertWarns((UserWarning, DeprecationWarning)): cals.save(file_type="csv", overwrite=True, file_prefix=self._prefix) with self.assertWarns(DeprecationWarning): @@ -1691,6 +1692,33 @@ def test_save_load_library(self): BackendData(backend).drive_freqs[0], ) + # Expected to fail because json calibration loading does not support + # restoring Parameter objects + @unittest.expectedFailure + def test_save_load_library(self): + """Test that we can load and save a library. + + These libraries contain both parameters with schedules and parameters without + any schedules (e.g. frequencies for qubits and readout). + """ + + library = FixedFrequencyTransmon() + backend = FakeArmonkV2() + cals = Calibrations.from_backend(backend, libraries=[library]) + + cals.parameters_table() + + cals.save(file_type="json", overwrite=True, file_prefix=self._prefix) + + loaded = Calibrations.load(self._prefix + ".json") + + # Test the value of a few loaded params. + self.assertEqual(loaded.get_parameter_value("amp", (0,), "x"), 0.5) + self.assertEqual( + loaded.get_parameter_value("drive_freq", (0,)), + BackendData(backend).drive_freqs[0], + ) + def test_json_round_trip(self): """Test round trip test for JSON file format. @@ -1698,7 +1726,7 @@ def test_json_round_trip(self): and we can still generate schedules with loaded calibration instance, even though calibrations is instantiated outside built-in library. """ - self.cals.save(file_type="json", overwrite="True", file_prefix=self._prefix) + self.cals.save(file_type="json", overwrite=True, file_prefix=self._prefix) loaded = self.cals.load(file_path=self._prefix + ".json") self.assertEqual(self.cals, loaded) @@ -1706,6 +1734,19 @@ def test_json_round_trip(self): roundtrip_sched = loaded.get_schedule("cr", (3, 2)) self.assertEqual(original_sched, roundtrip_sched) + def test_overwrite(self): + """Test that overwriting errors unless overwrite flag is used""" + self.cals.save(file_type="json", overwrite=True, file_prefix=self._prefix) + with self.assertRaises(CalibrationError): + self.cals.save(file_type="json", overwrite=False, file_prefix=self._prefix) + + # Add a value to make sure data is really overwritten and not carried + # over from first write + self.cals.add_parameter_value(0.45, "amp", (3,), "xp") + self.cals.save(file_type="json", overwrite=True, file_prefix=self._prefix) + loaded = Calibrations.load(file_path=self._prefix + ".json") + self.assertEqual(self.cals, loaded) + class TestInstructionScheduleMap(QiskitExperimentsTestCase): """Class to test the functionality of a Calibrations""" diff --git a/test/calibration/test_setup_library.py b/test/calibration/test_setup_library.py index 1c1307c97a..12900dbf38 100644 --- a/test/calibration/test_setup_library.py +++ b/test/calibration/test_setup_library.py @@ -25,7 +25,7 @@ from qiskit_experiments.framework.json import ExperimentEncoder, ExperimentDecoder -class TestLibrary(FixedFrequencyTransmon): +class MutableTestLibrary(FixedFrequencyTransmon): """A subclass designed for test_hash_warn. This class ensures that FixedFrequencyTransmon is preserved if anything goes wrong @@ -197,26 +197,27 @@ def test_hash_warn(self): 4. A warning is raised since the class definition has changed. """ - lib1 = TestLibrary() + lib1 = MutableTestLibrary() lib_data = json.dumps(lib1, cls=ExperimentEncoder) lib2 = json.loads(lib_data, cls=ExperimentDecoder) self.assertTrue(self._test_library_equivalence(lib1, lib2)) # stash method build schedules to avoid other tests from failing - build_schedules = TestLibrary._build_schedules + build_schedules = MutableTestLibrary._build_schedules def _my_build_schedules(): """A dummy function to change the class behaviour.""" pass # Change the schedule behaviour - TestLibrary._build_schedules = _my_build_schedules + MutableTestLibrary._build_schedules = _my_build_schedules with self.assertWarns(UserWarning): - json.loads(lib_data, cls=ExperimentDecoder) - - TestLibrary._build_schedules = build_schedules + try: + json.loads(lib_data, cls=ExperimentDecoder) + finally: + MutableTestLibrary._build_schedules = build_schedules def _test_library_equivalence(self, lib1, lib2) -> bool: """Test if libraries are equivalent. diff --git a/test/framework/test_composite.py b/test/framework/test_composite.py index a25dcea966..791b7b9689 100644 --- a/test/framework/test_composite.py +++ b/test/framework/test_composite.py @@ -20,10 +20,10 @@ from unittest import mock from ddt import ddt, data -from qiskit import QuantumCircuit, Aer +from qiskit import QuantumCircuit from qiskit.result import Result -from qiskit_aer import noise +from qiskit_aer import AerSimulator, noise from qiskit_ibm_experiment import IBMExperimentService @@ -915,7 +915,7 @@ def test_batch_transpile_options_integrated(self): (`test_batch_transpiled_circuits` takes care of it) but that it's correctly called within the entire flow of `BaseExperiment.run`. """ - backend = Aer.get_backend("aer_simulator") + backend = AerSimulator() noise_model = noise.NoiseModel() noise_model.add_all_qubit_quantum_error(noise.depolarizing_error(0.5, 2), ["cx", "swap"]) diff --git a/test/framework/test_framework.py b/test/framework/test_framework.py index 1f81579c51..105a5ffdf5 100644 --- a/test/framework/test_framework.py +++ b/test/framework/test_framework.py @@ -132,12 +132,14 @@ def test_run_analysis_experiment_data_pickle_roundtrip(self): """Test running analysis on ExperimentData after pickle roundtrip""" analysis = FakeAnalysis() expdata1 = ExperimentData() + expdata1.add_data(self.fake_job_data()) # Set physical qubit for more complete comparison expdata1.metadata["physical_qubits"] = (1,) expdata1 = analysis.run(expdata1, seed=54321) self.assertExperimentDone(expdata1) expdata2 = ExperimentData(experiment_id=expdata1.experiment_id) + expdata2.add_data(self.fake_job_data()) expdata2.metadata["physical_qubits"] = (1,) expdata2 = pickle.loads(pickle.dumps(expdata2)) expdata2 = analysis.run(expdata2, replace_results=True, seed=54321) diff --git a/test/library/quantum_volume/test_qv.py b/test/library/quantum_volume/test_qv.py index eca6c4ce03..c3b81c4591 100644 --- a/test/library/quantum_volume/test_qv.py +++ b/test/library/quantum_volume/test_qv.py @@ -13,13 +13,15 @@ """ A Tester for the Quantum Volume experiment """ +import warnings from test.base import QiskitExperimentsTestCase import json import os from uncertainties import UFloat + from qiskit.quantum_info.operators.predicates import matrix_equal +from qiskit_aer import AerSimulator -from qiskit import Aer from qiskit_experiments.framework import ExperimentData from qiskit_experiments.library import QuantumVolume from qiskit_experiments.framework import ExperimentDecoder @@ -102,19 +104,21 @@ def test_qv_sigma_decreasing(self): Test that the sigma is decreasing after adding more trials """ num_of_qubits = 3 - backend = Aer.get_backend("aer_simulator") + backend = AerSimulator() qv_exp = QuantumVolume(range(num_of_qubits), seed=SEED) # set number of trials to a low number to make the test faster - qv_exp.set_experiment_options(trials=2) - expdata1 = qv_exp.run(backend) - self.assertExperimentDone(expdata1) - result_data1 = expdata1.analysis_results(0) - expdata2 = qv_exp.run(backend, analysis=None) - self.assertExperimentDone(expdata2) - expdata2.add_data(expdata1.data()) - qv_exp.analysis.run(expdata2) - result_data2 = expdata2.analysis_results(0) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", message="Must use at least 100 trials") + qv_exp.set_experiment_options(trials=2) + expdata1 = qv_exp.run(backend) + self.assertExperimentDone(expdata1) + result_data1 = expdata1.analysis_results(0) + expdata2 = qv_exp.run(backend, analysis=None) + self.assertExperimentDone(expdata2) + expdata2.add_data(expdata1.data()) + qv_exp.analysis.run(expdata2) + result_data2 = expdata2.analysis_results(0) self.assertTrue(result_data1.extra["trials"] == 2, "number of trials is incorrect") self.assertTrue( @@ -139,14 +143,15 @@ def test_qv_failure_insufficient_trials(self): insufficient_trials_data = json.load(json_file, cls=ExperimentDecoder) num_of_qubits = 3 - backend = Aer.get_backend("aer_simulator") + backend = AerSimulator() qv_exp = QuantumVolume(range(num_of_qubits), seed=SEED) exp_data = ExperimentData(experiment=qv_exp, backend=backend) exp_data.add_data(insufficient_trials_data) - qv_exp.analysis.run(exp_data) - qv_result = exp_data.analysis_results(1) + with self.assertWarns(UserWarning): + qv_exp.analysis.run(exp_data) + qv_result = exp_data.analysis_results(1) self.assertTrue( qv_result.extra["success"] is False and qv_result.value == 1, "quantum volume is successful with less than 100 trials", @@ -165,7 +170,7 @@ def test_qv_failure_insufficient_hop(self): insufficient_hop_data = json.load(json_file, cls=ExperimentDecoder) num_of_qubits = 4 - backend = Aer.get_backend("aer_simulator") + backend = AerSimulator() qv_exp = QuantumVolume(range(num_of_qubits), seed=SEED) exp_data = ExperimentData(experiment=qv_exp, backend=backend) @@ -192,7 +197,7 @@ def test_qv_failure_insufficient_confidence(self): insufficient_confidence_data = json.load(json_file, cls=ExperimentDecoder) num_of_qubits = 4 - backend = Aer.get_backend("aer_simulator") + backend = AerSimulator() qv_exp = QuantumVolume(range(num_of_qubits), seed=SEED) exp_data = ExperimentData(experiment=qv_exp, backend=backend) @@ -216,7 +221,7 @@ def test_qv_success(self): successful_data = json.load(json_file, cls=ExperimentDecoder) num_of_qubits = 4 - backend = Aer.get_backend("aer_simulator") + backend = AerSimulator() qv_exp = QuantumVolume(range(num_of_qubits), seed=SEED) exp_data = ExperimentData(experiment=qv_exp, backend=backend) diff --git a/test/library/tomography/test_process_tomography.py b/test/library/tomography/test_process_tomography.py index e516e8a4d6..991caf35da 100644 --- a/test/library/tomography/test_process_tomography.py +++ b/test/library/tomography/test_process_tomography.py @@ -589,6 +589,8 @@ def test_qpt_conditional_meas(self): exp.analysis.set_options() if fitter: exp.analysis.set_options(fitter=fitter) + if "cvxpy" in fitter: + exp.analysis.set_options(fitter_options={"eps_abs": 3e-5}) fitdata = exp.analysis.run(expdata) states = fitdata.analysis_results("state") for state in states: diff --git a/test/visualization/mock_plotter.py b/test/visualization/mock_plotter.py index 3834eac1ba..2eeb26c862 100644 --- a/test/visualization/mock_plotter.py +++ b/test/visualization/mock_plotter.py @@ -278,5 +278,7 @@ def expected_supplementary_data_keys(cls) -> List[str]: textbox_text: Text to draw in a textbox. """ return [ + "report_text", + "supplementary_data_key", "textbox_text", ] diff --git a/test/visualization/test_iq_plotter.py b/test/visualization/test_iq_plotter.py index e39b6f6a09..b5df47d8c0 100644 --- a/test/visualization/test_iq_plotter.py +++ b/test/visualization/test_iq_plotter.py @@ -13,6 +13,7 @@ Test IQ plotter. """ +import warnings from itertools import product from test.base import QiskitExperimentsTestCase from typing import Any, Dict, List, Tuple @@ -126,7 +127,9 @@ def test_discriminator_trained(self, is_trained: bool): plotter.set_supplementary_data(discriminator=discrim) # Call figure() to generate discriminator image, if possible. - plotter.figure() + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Discriminator was provided but") + plotter.figure() # Assert that MockDiscriminator.predict() was/wasn't called, depending on whether it was trained # or not. diff --git a/test/visualization/test_plotter.py b/test/visualization/test_plotter.py index e8cd29baf9..4ca71e990c 100644 --- a/test/visualization/test_plotter.py +++ b/test/visualization/test_plotter.py @@ -13,7 +13,7 @@ Test integration of plotter. """ -from copy import copy +from copy import deepcopy from test.base import QiskitExperimentsTestCase from .mock_drawer import MockDrawer @@ -49,7 +49,7 @@ def test_series_data_end_to_end(self): }, } unexpected_data = ["a", True, 0] - expected_series_data = copy(series_data) + expected_series_data = deepcopy(series_data) expected_series_data["seriesA"]["unexpected_data"] = unexpected_data for series, data in series_data.items(): @@ -71,7 +71,7 @@ def test_supplementary_data_end_to_end(self): expected_supplementary_data = { "report_text": "Lorem ipsum", - "another_data_key": 3e9, + "supplementary_data_key": 3e9, } plotter.set_supplementary_data(**expected_supplementary_data) diff --git a/test/visualization/test_plotter_mpldrawer.py b/test/visualization/test_plotter_mpldrawer.py index 9e795b3265..c98ac7e40a 100644 --- a/test/visualization/test_plotter_mpldrawer.py +++ b/test/visualization/test_plotter_mpldrawer.py @@ -101,7 +101,10 @@ def test_unit_scale(self, args): def test_scale(self): """Test the xscale and yscale figure options.""" plotter = MockPlotter(MplDrawer(), plotting_enabled=True) - plotter.set_figure_options(xscale="quadratic", yscale="log") + plotter.set_figure_options( + xscale="quadratic", + yscale="log", + ) plotter.figure() ax = plotter.drawer._axis