From 7edccd26e2e4d8a1aed08ebb57599663289cdc71 Mon Sep 17 00:00:00 2001 From: Justin Kalloor Date: Mon, 27 Nov 2023 09:49:18 -0800 Subject: [PATCH 001/197] Modifying gitignore --- .gitignore | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.gitignore b/.gitignore index 18cb8929a..8bcc07d04 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,11 @@ dask-worker-space dist build scratch/ +**/*.pickle +**/*.data +**/*.pkl +**/*.log +**/*.txt +**/*.sh +**/*.qasm + From c1f00575ca2cb4bb05875e0b300f9108494e2878 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 28 Mar 2024 08:12:40 -0400 Subject: [PATCH 002/197] Split worker into two threads --- bqskit/__init__.py | 1 + bqskit/runtime/__init__.py | 2 +- bqskit/runtime/base.py | 20 ++++- bqskit/runtime/manager.py | 8 +- bqskit/runtime/task.py | 10 ++- bqskit/runtime/worker.py | 155 +++++++++++++++++++++---------------- 6 files changed, 122 insertions(+), 74 deletions(-) diff --git a/bqskit/__init__.py b/bqskit/__init__.py index dd6a87128..da01938ba 100644 --- a/bqskit/__init__.py +++ b/bqskit/__init__.py @@ -4,6 +4,7 @@ import logging from sys import stdout as _stdout +import bqskit.runtime from .version import __version__ # noqa: F401 from .version import __version_info__ # noqa: F401 from bqskit.compiler.compile import compile diff --git a/bqskit/runtime/__init__.py b/bqskit/runtime/__init__.py index 31764cb46..3477c3572 100644 --- a/bqskit/runtime/__init__.py +++ b/bqskit/runtime/__init__.py @@ -111,7 +111,7 @@ os.environ['NUMEXPR_NUM_THREADS'] = '1' os.environ['VECLIB_MAXIMUM_THREADS'] = '1' os.environ['RUST_BACKTRACE'] = '1' - +print("SETTING THREADS TO 1") if TYPE_CHECKING: from bqskit.runtime.future import RuntimeFuture diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index 17cdf2747..f46b95187 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -71,6 +71,7 @@ def has_idle_resources(self) -> bool: def send_outgoing(node: ServerBase) -> None: """Outgoing thread forwards messages as they are created.""" while True: + node.logger.debug('Waiting to send outgoing message...') outgoing = node.outgoing.get() if not node.running: @@ -80,9 +81,16 @@ def send_outgoing(node: ServerBase) -> None: # while condition. break + node.logger.debug(f'Sending message {outgoing[1].name}...') outgoing[0].send((outgoing[1], outgoing[2])) node.logger.debug(f'Sent message {outgoing[1].name}.') - node.logger.log(1, f'{outgoing[2]}\n') + + if outgoing[1] == RuntimeMessage.SUBMIT_BATCH: + node.logger.log(1, f'{len(outgoing[2])}\n') + else: + node.logger.log(1, f'{outgoing[2]}\n') + + node.outgoing.task_done() def sigint_handler(signum: int, _: FrameType | None, node: ServerBase) -> None: @@ -347,6 +355,7 @@ def run(self) -> None: try: while self.running: # Wait for messages + self.logger.debug('Waiting for messages...') events = self.sel.select() # Say that 5 times fast for key, _ in events: @@ -368,7 +377,10 @@ def run(self) -> None: continue log = f'Received message {msg.name} from {direction.name}.' self.logger.debug(log) - self.logger.log(1, f'{payload}\n') + if msg == RuntimeMessage.SUBMIT_BATCH: + self.logger.log(1, f'{len(payload)}\n') + else: + self.logger.log(1, f'{payload}\n') # Handle message self.handle_message(msg, direction, conn, payload) @@ -513,9 +525,10 @@ def schedule_tasks(self, tasks: Sequence[RuntimeTask]) -> None: """Schedule tasks between this node's employees.""" if len(tasks) == 0: return - + self.logger.info(f'Scheduling {len(tasks)} tasks with {self.num_idle_workers} idle workers.') assignments = self.assign_tasks(tasks) + # for e, assignment in sorted(zip(self.employees, assignments), key=lambda x: x[0].num_idle_workers, reverse=True): for e, assignment in zip(self.employees, assignments): num_tasks = len(assignment) @@ -528,6 +541,7 @@ def schedule_tasks(self, tasks: Sequence[RuntimeTask]) -> None: e.num_idle_workers -= min(num_tasks, e.num_idle_workers) self.num_idle_workers = sum(e.num_idle_workers for e in self.employees) + self.logger.info(f'Finished scheduling {len(tasks)} tasks with now {self.num_idle_workers} idle workers.') def send_result_down(self, result: RuntimeResult) -> None: """Send the `result` to the appropriate employee.""" diff --git a/bqskit/runtime/manager.py b/bqskit/runtime/manager.py index 7779f47ca..476f73c32 100644 --- a/bqskit/runtime/manager.py +++ b/bqskit/runtime/manager.py @@ -95,14 +95,16 @@ def __init__( MessageDirection.ABOVE, ) - # Case 1: spawn and manage workers + # Case 1: spawn and/or manage workers if ipports is None: if only_connect: self.connect_to_workers(num_workers, worker_port) else: + print('Spawning workers...') + print(f'Number of workers: {num_workers}') self.spawn_workers(num_workers, worker_port) - # Case 2: Connect to managers at ipports + # Case 2: Connect to detached managers at ipports else: self.connect_to_managers(ipports) @@ -122,6 +124,7 @@ def handle_message( payload: Any, ) -> None: """Process the message coming from `direction`.""" + self.logger.debug(f'Manager handling message {msg.name} from {direction.name}.') if direction == MessageDirection.ABOVE: if msg == RuntimeMessage.SUBMIT: @@ -133,6 +136,7 @@ def handle_message( rtasks = cast(List[RuntimeTask], payload) self.schedule_tasks(rtasks) self.update_upstream_idle_workers() + self.logger.debug(f'Finished handling submit batch from above.') elif msg == RuntimeMessage.RESULT: result = cast(RuntimeResult, payload) diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index b55037a87..7983633ee 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -28,7 +28,7 @@ class RuntimeTask: def __init__( self, - fnargs: tuple[Any, Any, Any], + fnargs: tuple[Any, Any, Any], # TODO: Look into retyping this return_address: RuntimeAddress, comp_task_id: int, breadcrumbs: tuple[RuntimeAddress, ...], @@ -110,3 +110,11 @@ async def run(self) -> Any: def is_descendant_of(self, addr: RuntimeAddress) -> bool: """Return true if `addr` identifies a parent (or this) task.""" return addr == self.return_address or addr in self.breadcrumbs + + def __str__(self) -> str: + """Return a string representation of the task.""" + return f'{self.fnargs[0].__name__}' + + def __repr__(self) -> str: + """Return a string representation of the task.""" + return f'' diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 5c4b3ca18..1c92c5f27 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -14,6 +14,9 @@ from multiprocessing.connection import Client from multiprocessing.connection import Connection from multiprocessing.connection import wait +from threading import Thread +from queue import Queue +from queue import Empty from typing import Any from typing import Callable from typing import cast @@ -122,15 +125,50 @@ def deposit_result(self, result: RuntimeResult) -> None: self.result[slot_id] = result.result +def handle_incoming_comms(worker: Worker) -> None: + """Handle all incoming messages.""" + while True: + # Handle incomming communication + msg, payload = worker._conn.recv() + + # Process message + if msg == RuntimeMessage.SHUTDOWN: + worker._running = False + return + + elif msg == RuntimeMessage.SUBMIT: + task = cast(RuntimeTask, payload) + worker._add_task(task) + + elif msg == RuntimeMessage.SUBMIT_BATCH: + tasks = cast(List[RuntimeTask], payload) + worker._add_task(tasks.pop()) # Submit one task + worker._delayed_tasks.extend(tasks) # Delay rest + # Delayed tasks have no context and are stored (more-or-less) + # as a function pointer together with the arguments. + # When it gets started, it consumes much more memory, + # so we delay the task start until necessary (at no cost) + + elif msg == RuntimeMessage.RESULT: + result = cast(RuntimeResult, payload) + worker._handle_result(result) + + elif msg == RuntimeMessage.CANCEL: + addr = cast(RuntimeAddress, payload) + worker._handle_cancel(addr) + + class Worker: """ BQSKit Runtime's Worker. - BQSKit Runtime utilizes a single-threaded worker to accept, execute, + BQSKit Runtime utilizes a dual-threaded worker to accept, execute, pause, spawn, resume, and complete tasks in a custom event loop built with python's async await mechanisms. Each worker receives and sends tasks and results to the greater system through a single duplex - connection with a runtime server or manager. + connection with a runtime server or manager. One thread performs + work and sends outgoing messages, while the other thread handles + incoming messages. At start-up, the worker receives an ID and waits for its first task. An executing task may use the `submit` and `map` methods to spawn child @@ -178,8 +216,9 @@ def __init__(self, id: int, conn: Connection) -> None: self._id = id self._conn = conn - self._outgoing: list[tuple[RuntimeMessage, Any]] = [] - """Stores outgoing messages to be handled by the event loop.""" + # self._outgoing: list[tuple[RuntimeMessage, Any]] = [] + # self._outgoing: Queue[tuple[RuntimeMessage, Any]] = Queue() + # """Stores outgoing messages to be handled by the event loop.""" self._tasks: dict[RuntimeAddress, RuntimeTask] = {} """Tracks all started, unfinished tasks on this worker.""" @@ -187,7 +226,8 @@ def __init__(self, id: int, conn: Connection) -> None: self._delayed_tasks: list[RuntimeTask] = [] """Store all delayed tasks in LIFO order.""" - self._ready_task_ids: WorkerQueue = WorkerQueue() + # self._ready_task_ids: WorkerQueue = WorkerQueue() + self._ready_task_ids: Queue[RuntimeAddress] = Queue() """Tasks queued up for execution.""" self._cancelled_task_ids: set[RuntimeAddress] = set() @@ -208,7 +248,7 @@ def __init__(self, id: int, conn: Connection) -> None: self._cache: dict[str, Any] = {} """Local worker cache.""" - # Send out every emitted log message upstream + # Send out every client emitted log message upstream old_factory = logging.getLogRecordFactory() def record_factory(*args: Any, **kwargs: Any) -> logging.LogRecord: @@ -218,11 +258,19 @@ def record_factory(*args: Any, **kwargs: Any) -> logging.LogRecord: lvl = active_task.logging_level if lvl is None or lvl <= record.levelno: tid = active_task.comp_task_id - self._outgoing.append((RuntimeMessage.LOG, (tid, record))) + self._conn.send((RuntimeMessage.LOG, (tid, record))) return record logging.setLogRecordFactory(record_factory) + # Start incoming thread + self.incomming_thread = Thread( + target=handle_incoming_comms, + args=(self,), + ) + self.incomming_thread.start() + # self.logger.info('Started incoming thread.') + # Communicate that this worker is ready self._conn.send((RuntimeMessage.STARTED, self._id)) @@ -231,8 +279,8 @@ def _loop(self) -> None: self._running = True while self._running: self._try_step_next_ready_task() - self._try_idle() - self._handle_comms() + # self._try_idle() + # self._handle_comms() def _try_idle(self) -> None: """If there is nothing to do, wait until we receive a message.""" @@ -244,44 +292,12 @@ def _try_idle(self) -> None: self._conn.send((RuntimeMessage.WAITING, 1)) wait([self._conn]) - def _handle_comms(self) -> None: - """Handle all incoming and outgoing messages.""" - - # Handle outgoing communication + def _flush_outgoing_comms(self) -> None: + """Handle all outgoing messages.""" for out_msg in self._outgoing: self._conn.send(out_msg) self._outgoing.clear() - # Handle incomming communication - while self._conn.poll(): - msg, payload = self._conn.recv() - - # Process message - if msg == RuntimeMessage.SHUTDOWN: - self._running = False - return - - elif msg == RuntimeMessage.SUBMIT: - task = cast(RuntimeTask, payload) - self._add_task(task) - - elif msg == RuntimeMessage.SUBMIT_BATCH: - tasks = cast(List[RuntimeTask], payload) - self._add_task(tasks.pop()) # Submit one task - self._delayed_tasks.extend(tasks) # Delay rest - # Delayed tasks have no context and are stored (more-or-less) - # as a function pointer together with the arguments. - # When it gets started, it consumes much more memory, - # so we delay the task start until necessary (at no cost) - - elif msg == RuntimeMessage.RESULT: - result = cast(RuntimeResult, payload) - self._handle_result(result) - - elif msg == RuntimeMessage.CANCEL: - addr = cast(RuntimeAddress, payload) - self._handle_cancel(addr) - def _add_task(self, task: RuntimeTask) -> None: """Start a task and add it to the loop.""" self._tasks[task.return_address] = task @@ -290,8 +306,9 @@ def _add_task(self, task: RuntimeTask) -> None: def _handle_result(self, result: RuntimeResult) -> None: """Insert result into appropriate mailbox and wake waiting task.""" - mailbox_id = result.return_address.mailbox_index assert result.return_address.worker_id == self._id + + mailbox_id = result.return_address.mailbox_index if mailbox_id not in self._mailboxes: # If the mailbox has been dropped due to a cancel, ignore result return @@ -338,16 +355,23 @@ def _handle_cancel(self, addr: RuntimeAddress) -> None: if not t.is_descendant_of(addr) ] - def _get_next_ready_task(self) -> RuntimeTask | None: - """Return the next ready task if one exists, otherwise None.""" + def _get_next_ready_task(self) -> RuntimeTask: + """Return the next ready task if one exists, otherwise block.""" while True: - if self._ready_task_ids.empty(): - if len(self._delayed_tasks) > 0: - self._add_task(self._delayed_tasks.pop()) - continue - return None + if self._ready_task_ids.empty() and len(self._delayed_tasks) > 0: + self._add_task(self._delayed_tasks.pop()) + continue - addr = self._ready_task_ids.get() + try: + addr = self._ready_task_ids.get_nowait() + except Empty: + # TODO: evaluate race condition here: + # If the incoming comms thread adds a task to the ready queue + # after this check, then the worker will have incorrectly + # sent a waiting message to the manager. + # TODO: consider some lock mechanism to prevent this? + self._conn.send((RuntimeMessage.WAITING, 1)) + addr = self._ready_task_ids.get() if addr in self._cancelled_task_ids or addr not in self._tasks: # When a task is cancelled on the worker it is not removed @@ -362,6 +386,7 @@ def _get_next_ready_task(self) -> RuntimeTask | None: # then discard this one too. Each breadcrumb (bcb) is a # task address (unique system-wide task id) of an ancestor # task. + # TODO: do I need to manually remove addr from self._tasks? continue return task @@ -370,10 +395,6 @@ def _try_step_next_ready_task(self) -> None: """Select a task to run, and advance it one step.""" task = self._get_next_ready_task() - if task is None: - # Nothing to do - return - try: self._active_task = task @@ -392,7 +413,7 @@ def _try_step_next_ready_task(self) -> None: exc_info = sys.exc_info() error_str = ''.join(traceback.format_exception(*exc_info)) error_payload = (self._active_task.comp_task_id, error_str) - self._outgoing.append((RuntimeMessage.ERROR, error_payload)) + self._conn.send((RuntimeMessage.ERROR, error_payload)) finally: self._active_task = None @@ -428,11 +449,11 @@ def _process_task_completion(self, task: RuntimeTask, result: Any) -> None: if task.return_address.worker_id == self._id: self._handle_result(packaged_result) - self._outgoing.append((RuntimeMessage.UPDATE, -1)) + self._conn.send((RuntimeMessage.UPDATE, -1)) # Let manager know this worker has one less task # without sending a result else: - self._outgoing.append((RuntimeMessage.RESULT, packaged_result)) + self._conn.send((RuntimeMessage.RESULT, packaged_result)) # Remove task self._tasks.pop(task.return_address) @@ -448,10 +469,6 @@ def _process_task_completion(self, task: RuntimeTask, result: Any) -> None: # Otherwise send a cancel message self.cancel(RuntimeFuture(mailbox_id)) - # Start delayed task - if self._ready_task_ids.empty() and len(self._delayed_tasks) > 0: - self._add_task(self._delayed_tasks.pop()) - def _get_desired_result(self, task: RuntimeTask) -> Any: """Retrieve the task's desired result from the mailboxes.""" if task.desired_box_id is None: @@ -501,7 +518,7 @@ def submit( ) # Submit the task (on the next cycle) - self._outgoing.append((RuntimeMessage.SUBMIT, task)) + self._conn.send((RuntimeMessage.SUBMIT, task)) # Return future pointing to the mailbox return RuntimeFuture(mailbox_id) @@ -548,7 +565,7 @@ def map( ] # Submit the tasks - self._outgoing.append((RuntimeMessage.SUBMIT_BATCH, tasks)) + self._conn.send((RuntimeMessage.SUBMIT_BATCH, tasks)) # Return future pointing to the mailbox return RuntimeFuture(mailbox_id) @@ -563,8 +580,8 @@ def cancel(self, future: RuntimeFuture) -> None: RuntimeAddress(self._id, future.mailbox_id, slot_id) for slot_id in range(num_slots) ] - msgs = [(RuntimeMessage.CANCEL, addr) for addr in addrs] - self._outgoing.extend(msgs) + for addr in addrs: + self._conn.send((RuntimeMessage.CANCEL, addr)) def get_cache(self) -> dict[str, Any]: """ @@ -618,11 +635,13 @@ def start_worker(w_id: int | None, port: int, cpu: int | None = None) -> None: logger.handlers.clear() logging.Logger.manager.loggerDict = {} + # Pin worker to cpu if cpu is not None: if sys.platform == 'win32': raise RuntimeError('Cannot pin worker to cpu on windows.') os.sched_setaffinity(0, [cpu]) + # Connect to manager max_retries = 7 wait_time = .1 conn: Connection | None = None @@ -639,10 +658,12 @@ def start_worker(w_id: int | None, port: int, cpu: int | None = None) -> None: if conn is None: raise RuntimeError('Unable to establish connection with manager.') + # If id isn't provided, wait for assignment if w_id is None: msg, w_id = conn.recv() assert msg == RuntimeMessage.STARTED + # Build and start worker global _worker _worker = Worker(w_id, conn) _worker._loop() From 4156b8cb757bb8e6463ba4239780efb9c5638061 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 28 Mar 2024 17:47:00 -0400 Subject: [PATCH 003/197] Removed race condition in idle tracking --- bqskit/runtime/base.py | 63 ++++++++++++++++++++++++++------------ bqskit/runtime/detached.py | 4 +-- bqskit/runtime/manager.py | 25 ++++++++------- bqskit/runtime/task.py | 10 +++++- bqskit/runtime/worker.py | 26 ++++++++++++---- 5 files changed, 89 insertions(+), 39 deletions(-) diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index f46b95187..3e6bd47b2 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -41,15 +41,21 @@ def __init__( conn: Connection, total_workers: int, process: Process | None = None, - num_tasks: int = 0, ) -> None: """Construct an employee with all resources idle.""" self.conn: Connection = conn self.total_workers = total_workers self.process = process - self.num_tasks = num_tasks + self.num_tasks = 0 self.num_idle_workers = total_workers + self.submit_cache: list[tuple[RuntimeAddress, int]] = [] + """ + Tracks recently submitted tasks by id and count. + This is used to adjust the idle worker count when + the employee sends a waiting message. + """ + def shutdown(self) -> None: """Shutdown the employee.""" try: @@ -67,11 +73,25 @@ def shutdown(self) -> None: def has_idle_resources(self) -> bool: return self.num_idle_workers > 0 + def get_num_of_tasks_sent_since( + self, + read_receipt: RuntimeAddress | None, + ) -> int: + """Return the number of tasks sent since the read receipt.""" + if read_receipt is None: + return sum(count for _, count in self.submit_cache) + + for i, (addr, _) in enumerate(self.submit_cache): + if addr == read_receipt: + self.submit_cache = self.submit_cache[:i] + return sum(count for _, count in self.submit_cache[1:]) + + raise RuntimeError('Read receipt not found in submit cache.') + def send_outgoing(node: ServerBase) -> None: """Outgoing thread forwards messages as they are created.""" while True: - node.logger.debug('Waiting to send outgoing message...') outgoing = node.outgoing.get() if not node.running: @@ -81,7 +101,6 @@ def send_outgoing(node: ServerBase) -> None: # while condition. break - node.logger.debug(f'Sending message {outgoing[1].name}...') outgoing[0].send((outgoing[1], outgoing[2])) node.logger.debug(f'Sent message {outgoing[1].name}.') @@ -525,7 +544,6 @@ def schedule_tasks(self, tasks: Sequence[RuntimeTask]) -> None: """Schedule tasks between this node's employees.""" if len(tasks) == 0: return - self.logger.info(f'Scheduling {len(tasks)} tasks with {self.num_idle_workers} idle workers.') assignments = self.assign_tasks(tasks) # for e, assignment in sorted(zip(self.employees, assignments), key=lambda x: x[0].num_idle_workers, reverse=True): @@ -539,9 +557,9 @@ def schedule_tasks(self, tasks: Sequence[RuntimeTask]) -> None: e.num_tasks += num_tasks e.num_idle_workers -= min(num_tasks, e.num_idle_workers) + e.submit_cache.append((assignment[0].unique_id, num_tasks)) self.num_idle_workers = sum(e.num_idle_workers for e in self.employees) - self.logger.info(f'Finished scheduling {len(tasks)} tasks with now {self.num_idle_workers} idle workers.') def send_result_down(self, result: RuntimeResult) -> None: """Send the `result` to the appropriate employee.""" @@ -568,23 +586,30 @@ def broadcast_cancel(self, addr: RuntimeAddress) -> None: for employee in self.employees: self.outgoing.put((employee.conn, RuntimeMessage.CANCEL, addr)) - def handle_waiting(self, conn: Connection, new_idle_count: int) -> None: + def handle_waiting( + self, + conn: Connection, + new_idle_count: int, + read_receipt: RuntimeAddress | None, + ) -> None: """ Record that an employee is idle with nothing to do. - There is a race condition here that is allowed. If an employee - sends a waiting message at the same time that this sends it a - task, it will still be marked waiting even though it is running - a task. We allow this for two reasons. First, the consequences are - minimal: this situation can only lead to one extra task assigned - to the worker that could otherwise go to a truly idle worker. - Second, it is unlikely in the common BQSKit workflows, which have - wide and shallow task graphs and each leaf task can require seconds - of runtime. + There is a race condition that is corrected here. If an employee + sends a waiting message at the same time that its boss sends it a + task, the boss's idle count will eventually be incorrect. To fix + this, every waiting message sent by an employee is accompanied by + a read receipt of the latest batch of tasks it has processed. The + boss can then adjust the idle count by the number of tasks sent + since the read receipt. """ - old_count = self.conn_to_employee_dict[conn].num_idle_workers - self.conn_to_employee_dict[conn].num_idle_workers = new_idle_count - self.num_idle_workers += (new_idle_count - old_count) + employee = self.conn_to_employee_dict[conn] + unaccounted_task = employee.get_num_of_tasks_sent_since(read_receipt) + adjusted_idle_count = max(new_idle_count - unaccounted_task, 0) + + old_count = employee.num_idle_workers + employee.num_idle_workers = adjusted_idle_count + self.num_idle_workers += (adjusted_idle_count - old_count) assert 0 <= self.num_idle_workers <= self.total_workers diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index 3b817ef1a..878fb4847 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -182,8 +182,8 @@ def handle_message( self.handle_shutdown() elif msg == RuntimeMessage.WAITING: - num_idle = cast(int, payload) - self.handle_waiting(conn, num_idle) + num_idle, read_receipt = cast(int, payload) + self.handle_waiting(conn, num_idle, read_receipt) elif msg == RuntimeMessage.UPDATE: task_diff = cast(int, payload) diff --git a/bqskit/runtime/manager.py b/bqskit/runtime/manager.py index 476f73c32..0e40a052f 100644 --- a/bqskit/runtime/manager.py +++ b/bqskit/runtime/manager.py @@ -100,8 +100,6 @@ def __init__( if only_connect: self.connect_to_workers(num_workers, worker_port) else: - print('Spawning workers...') - print(f'Number of workers: {num_workers}') self.spawn_workers(num_workers, worker_port) # Case 2: Connect to detached managers at ipports @@ -111,6 +109,9 @@ def __init__( # Track info on sent messages to reduce redundant messages: self.last_num_idle_sent_up = self.total_workers + # Track info on received messages to report read receipts: + self.most_recent_read_submit: RuntimeAddress | None = None + # Inform upstream we are starting msg = (self.upstream, RuntimeMessage.STARTED, self.total_workers) self.outgoing.put(msg) @@ -124,19 +125,19 @@ def handle_message( payload: Any, ) -> None: """Process the message coming from `direction`.""" - self.logger.debug(f'Manager handling message {msg.name} from {direction.name}.') if direction == MessageDirection.ABOVE: if msg == RuntimeMessage.SUBMIT: rtask = cast(RuntimeTask, payload) + self.most_recent_read_submit = rtask.unique_id self.schedule_tasks([rtask]) - self.update_upstream_idle_workers() + # self.update_upstream_idle_workers() elif msg == RuntimeMessage.SUBMIT_BATCH: rtasks = cast(List[RuntimeTask], payload) + self.most_recent_read_submit = rtasks[0].unique_id self.schedule_tasks(rtasks) - self.update_upstream_idle_workers() - self.logger.debug(f'Finished handling submit batch from above.') + # self.update_upstream_idle_workers() elif msg == RuntimeMessage.RESULT: result = cast(RuntimeResult, payload) @@ -157,20 +158,20 @@ def handle_message( if msg == RuntimeMessage.SUBMIT: rtask = cast(RuntimeTask, payload) self.send_up_or_schedule_tasks([rtask]) - self.update_upstream_idle_workers() + # self.update_upstream_idle_workers() elif msg == RuntimeMessage.SUBMIT_BATCH: rtasks = cast(List[RuntimeTask], payload) self.send_up_or_schedule_tasks(rtasks) - self.update_upstream_idle_workers() + # self.update_upstream_idle_workers() elif msg == RuntimeMessage.RESULT: result = cast(RuntimeResult, payload) self.handle_result_from_below(result) elif msg == RuntimeMessage.WAITING: - num_idle = cast(int, payload) - self.handle_waiting(conn, num_idle) + num_idle, read_receipt = cast(int, payload) + self.handle_waiting(conn, num_idle, read_receipt) self.update_upstream_idle_workers() elif msg == RuntimeMessage.UPDATE: @@ -221,6 +222,7 @@ def send_up_or_schedule_tasks(self, tasks: Sequence[RuntimeTask]) -> None: if num_idle != 0: self.outgoing.put((self.upstream, RuntimeMessage.UPDATE, num_idle)) self.schedule_tasks(tasks[:num_idle]) + self.update_upstream_idle_workers() if len(tasks) > num_idle: self.outgoing.put(( @@ -248,7 +250,8 @@ def update_upstream_idle_workers(self) -> None: """Update the total number of idle workers upstream.""" if self.num_idle_workers != self.last_num_idle_sent_up: self.last_num_idle_sent_up = self.num_idle_workers - m = (self.upstream, RuntimeMessage.WAITING, self.num_idle_workers) + payload = (self.num_idle_workers, self.most_recent_read_submit) + m = (self.upstream, RuntimeMessage.WAITING, payload) self.outgoing.put(m) def handle_update(self, conn: Connection, task_diff: int) -> None: diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index 7983633ee..676f5338b 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -43,7 +43,10 @@ def __init__( """Tuple of function pointer, arguments, and keyword arguments.""" self.return_address = return_address - """Where the result of this task should be sent.""" + """ + Where the result of this task should be sent. + This doubles as a unique system-wide id for the task. + """ self.logging_level = logging_level """Logs with levels >= to this get emitted, if None always emit.""" @@ -97,6 +100,11 @@ def step(self, send_val: Any = None) -> Any: return to_return + @property + def unique_id(self) -> RuntimeAddress: + """Return the task's system-wide unique id.""" + return self.return_address + def start(self) -> None: """Initialize the task.""" self.coro = self.run() diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 1c92c5f27..15f02f267 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -15,6 +15,7 @@ from multiprocessing.connection import Connection from multiprocessing.connection import wait from threading import Thread +from threading import Lock from queue import Queue from queue import Empty from typing import Any @@ -137,17 +138,23 @@ def handle_incoming_comms(worker: Worker) -> None: return elif msg == RuntimeMessage.SUBMIT: + worker.read_receipt_mutex.acquire() task = cast(RuntimeTask, payload) + worker._most_recent_read_submit = task.unique_id worker._add_task(task) + worker.read_receipt_mutex.release() elif msg == RuntimeMessage.SUBMIT_BATCH: + worker.read_receipt_mutex.acquire() tasks = cast(List[RuntimeTask], payload) + worker._most_recent_read_submit = tasks[0].unique_id worker._add_task(tasks.pop()) # Submit one task worker._delayed_tasks.extend(tasks) # Delay rest # Delayed tasks have no context and are stored (more-or-less) # as a function pointer together with the arguments. # When it gets started, it consumes much more memory, # so we delay the task start until necessary (at no cost) + worker.read_receipt_mutex.release() elif msg == RuntimeMessage.RESULT: result = cast(RuntimeResult, payload) @@ -248,6 +255,12 @@ def __init__(self, id: int, conn: Connection) -> None: self._cache: dict[str, Any] = {} """Local worker cache.""" + self.most_recent_read_submit: RuntimeAddress | None = None + """Tracks the most recently processed submit message from above.""" + + self.read_receipt_mutex = Lock() + """A lock to ensure waiting messages's read receipt is correct.""" + # Send out every client emitted log message upstream old_factory = logging.getLogRecordFactory() @@ -362,17 +375,18 @@ def _get_next_ready_task(self) -> RuntimeTask: self._add_task(self._delayed_tasks.pop()) continue + self.read_receipt_mutex.acquire() try: addr = self._ready_task_ids.get_nowait() except Empty: - # TODO: evaluate race condition here: - # If the incoming comms thread adds a task to the ready queue - # after this check, then the worker will have incorrectly - # sent a waiting message to the manager. - # TODO: consider some lock mechanism to prevent this? - self._conn.send((RuntimeMessage.WAITING, 1)) + payload = (1, self.most_recent_read_submit) + self._conn.send((RuntimeMessage.WAITING, payload)) + self.read_receipt_mutex.release() addr = self._ready_task_ids.get() + if self.read_receipt_mutex.locked(): + self.read_receipt_mutex.release() + if addr in self._cancelled_task_ids or addr not in self._tasks: # When a task is cancelled on the worker it is not removed # from the ready queue because it is much cheaper to just From 7c2edc7dc3d1433a15bef904633eb3d40123688f Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 28 Mar 2024 18:03:24 -0400 Subject: [PATCH 004/197] pre-commit-ish --- bqskit/runtime/__init__.py | 2 +- bqskit/runtime/base.py | 18 ++++++++--------- bqskit/runtime/detached.py | 4 +++- bqskit/runtime/manager.py | 4 +++- bqskit/runtime/task.py | 1 + bqskit/runtime/worker.py | 41 +++++++++++++++++++------------------- 6 files changed, 37 insertions(+), 33 deletions(-) diff --git a/bqskit/runtime/__init__.py b/bqskit/runtime/__init__.py index 3477c3572..0443671b6 100644 --- a/bqskit/runtime/__init__.py +++ b/bqskit/runtime/__init__.py @@ -111,7 +111,7 @@ os.environ['NUMEXPR_NUM_THREADS'] = '1' os.environ['VECLIB_MAXIMUM_THREADS'] = '1' os.environ['RUST_BACKTRACE'] = '1' -print("SETTING THREADS TO 1") +print('SETTING THREADS TO 1') if TYPE_CHECKING: from bqskit.runtime.future import RuntimeFuture diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index 3e6bd47b2..52b50deca 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -52,8 +52,9 @@ def __init__( self.submit_cache: list[tuple[RuntimeAddress, int]] = [] """ Tracks recently submitted tasks by id and count. - This is used to adjust the idle worker count when - the employee sends a waiting message. + + This is used to adjust the idle worker count when the employee sends a + waiting message. """ def shutdown(self) -> None: @@ -595,13 +596,12 @@ def handle_waiting( """ Record that an employee is idle with nothing to do. - There is a race condition that is corrected here. If an employee - sends a waiting message at the same time that its boss sends it a - task, the boss's idle count will eventually be incorrect. To fix - this, every waiting message sent by an employee is accompanied by - a read receipt of the latest batch of tasks it has processed. The - boss can then adjust the idle count by the number of tasks sent - since the read receipt. + There is a race condition that is corrected here. If an employee sends a + waiting message at the same time that its boss sends it a task, the + boss's idle count will eventually be incorrect. To fix this, every + waiting message sent by an employee is accompanied by a read receipt of + the latest batch of tasks it has processed. The boss can then adjust the + idle count by the number of tasks sent since the read receipt. """ employee = self.conn_to_employee_dict[conn] unaccounted_task = employee.get_num_of_tasks_sent_since(read_receipt) diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index 878fb4847..2bc015cc1 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -15,6 +15,7 @@ from typing import Any from typing import cast from typing import List +from typing import Optional from typing import Sequence from bqskit.compiler.status import CompilationStatus @@ -182,7 +183,8 @@ def handle_message( self.handle_shutdown() elif msg == RuntimeMessage.WAITING: - num_idle, read_receipt = cast(int, payload) + p = cast(tuple[int, Optional[RuntimeAddress]], payload) + num_idle, read_receipt = p self.handle_waiting(conn, num_idle, read_receipt) elif msg == RuntimeMessage.UPDATE: diff --git a/bqskit/runtime/manager.py b/bqskit/runtime/manager.py index 0e40a052f..4e0f13c76 100644 --- a/bqskit/runtime/manager.py +++ b/bqskit/runtime/manager.py @@ -9,6 +9,7 @@ from typing import Any from typing import cast from typing import List +from typing import Optional from typing import Sequence from bqskit.runtime import default_manager_port @@ -170,7 +171,8 @@ def handle_message( self.handle_result_from_below(result) elif msg == RuntimeMessage.WAITING: - num_idle, read_receipt = cast(int, payload) + p = cast(tuple[int, Optional[RuntimeAddress]], payload) + num_idle, read_receipt = p self.handle_waiting(conn, num_idle, read_receipt) self.update_upstream_idle_workers() diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index 676f5338b..c9f582804 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -45,6 +45,7 @@ def __init__( self.return_address = return_address """ Where the result of this task should be sent. + This doubles as a unique system-wide id for the task. """ diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 15f02f267..af7d86b9c 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -13,11 +13,10 @@ from multiprocessing import Process from multiprocessing.connection import Client from multiprocessing.connection import Connection -from multiprocessing.connection import wait -from threading import Thread -from threading import Lock -from queue import Queue from queue import Empty +from queue import Queue +from threading import Lock +from threading import Thread from typing import Any from typing import Callable from typing import cast @@ -140,14 +139,14 @@ def handle_incoming_comms(worker: Worker) -> None: elif msg == RuntimeMessage.SUBMIT: worker.read_receipt_mutex.acquire() task = cast(RuntimeTask, payload) - worker._most_recent_read_submit = task.unique_id + worker.most_recent_read_submit = task.unique_id worker._add_task(task) worker.read_receipt_mutex.release() elif msg == RuntimeMessage.SUBMIT_BATCH: worker.read_receipt_mutex.acquire() tasks = cast(List[RuntimeTask], payload) - worker._most_recent_read_submit = tasks[0].unique_id + worker.most_recent_read_submit = tasks[0].unique_id worker._add_task(tasks.pop()) # Submit one task worker._delayed_tasks.extend(tasks) # Delay rest # Delayed tasks have no context and are stored (more-or-less) @@ -295,21 +294,21 @@ def _loop(self) -> None: # self._try_idle() # self._handle_comms() - def _try_idle(self) -> None: - """If there is nothing to do, wait until we receive a message.""" - empty_outgoing = len(self._outgoing) == 0 - no_ready_tasks = self._ready_task_ids.empty() - no_delayed_tasks = len(self._delayed_tasks) == 0 - - if empty_outgoing and no_ready_tasks and no_delayed_tasks: - self._conn.send((RuntimeMessage.WAITING, 1)) - wait([self._conn]) - - def _flush_outgoing_comms(self) -> None: - """Handle all outgoing messages.""" - for out_msg in self._outgoing: - self._conn.send(out_msg) - self._outgoing.clear() + # def _try_idle(self) -> None: + # """If there is nothing to do, wait until we receive a message.""" + # empty_outgoing = len(self._outgoing) == 0 + # no_ready_tasks = self._ready_task_ids.empty() + # no_delayed_tasks = len(self._delayed_tasks) == 0 + + # if empty_outgoing and no_ready_tasks and no_delayed_tasks: + # self._conn.send((RuntimeMessage.WAITING, 1)) + # wait([self._conn]) + + # def _flush_outgoing_comms(self) -> None: + # """Handle all outgoing messages.""" + # for out_msg in self._outgoing: + # self._conn.send(out_msg) + # self._outgoing.clear() def _add_task(self, task: RuntimeTask) -> None: """Start a task and add it to the loop.""" From ead3663e5b2ac749e6b87d500e743903fd601690 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sat, 30 Mar 2024 15:54:30 -0400 Subject: [PATCH 005/197] Runtime tests passing local --- bqskit/runtime/base.py | 3 +-- bqskit/runtime/detached.py | 1 + bqskit/runtime/future.py | 4 ++-- bqskit/runtime/worker.py | 31 +++++++++++++++++++++++++++---- tests/runtime/test_attached.py | 4 ++-- tests/runtime/test_next.py | 2 +- 6 files changed, 34 insertions(+), 11 deletions(-) diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index 52b50deca..0a1fd0a31 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -84,7 +84,7 @@ def get_num_of_tasks_sent_since( for i, (addr, _) in enumerate(self.submit_cache): if addr == read_receipt: - self.submit_cache = self.submit_cache[:i] + self.submit_cache = self.submit_cache[i:] return sum(count for _, count in self.submit_cache[1:]) raise RuntimeError('Read receipt not found in submit cache.') @@ -375,7 +375,6 @@ def run(self) -> None: try: while self.running: # Wait for messages - self.logger.debug('Waiting for messages...') events = self.sel.select() # Say that 5 times fast for key, _ in events: diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index 2bc015cc1..d86e94cfa 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -369,6 +369,7 @@ def handle_error(self, error_payload: tuple[int, str]) -> None: tid = error_payload[0] conn = self.tasks[self.mailbox_to_task_dict[tid]][1] self.outgoing.put((conn, RuntimeMessage.ERROR, error_payload[1])) + # TODO: Broadcast cancel to all tasks with compilation task id tid def handle_log(self, log_payload: tuple[int, LogRecord]) -> None: """Forward logs to appropriate client.""" diff --git a/bqskit/runtime/future.py b/bqskit/runtime/future.py index 70f6ac2cc..ab69d5911 100644 --- a/bqskit/runtime/future.py +++ b/bqskit/runtime/future.py @@ -27,8 +27,8 @@ def __await__(self) -> Any: Informs the event loop which mailbox this is waiting on. """ - if self._next_flag: - return (yield self) + # if self._next_flag: + # return (yield self) return (yield self) diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index af7d86b9c..103f9eba6 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -134,6 +134,8 @@ def handle_incoming_comms(worker: Worker) -> None: # Process message if msg == RuntimeMessage.SHUTDOWN: worker._running = False + worker._ready_task_ids.put(RuntimeAddress(-1, -1, -1)) + # TODO: Interupt main, maybe even kill it return elif msg == RuntimeMessage.SUBMIT: @@ -162,6 +164,7 @@ def handle_incoming_comms(worker: Worker) -> None: elif msg == RuntimeMessage.CANCEL: addr = cast(RuntimeAddress, payload) worker._handle_cancel(addr) + # TODO: preempt? class Worker: @@ -280,6 +283,7 @@ def record_factory(*args: Any, **kwargs: Any) -> logging.LogRecord: target=handle_incoming_comms, args=(self,), ) + self.incomming_thread.daemon = True self.incomming_thread.start() # self.logger.info('Started incoming thread.') @@ -334,6 +338,7 @@ def _handle_result(self, result: RuntimeResult) -> None: if task.wake_on_next or box.ready: self._ready_task_ids.put(box.dest_addr) # Wake it + box.dest_addr = None # Prevent double wake def _handle_cancel(self, addr: RuntimeAddress) -> None: """ @@ -349,6 +354,7 @@ def _handle_cancel(self, addr: RuntimeAddress) -> None: for themselves using breadcrumbs and the original `addr` cancel message. """ + # TODO: Send update message? self._cancelled_task_ids.add(addr) # Remove all tasks that are children of `addr` from initialized tasks @@ -382,6 +388,8 @@ def _get_next_ready_task(self) -> RuntimeTask: self._conn.send((RuntimeMessage.WAITING, payload)) self.read_receipt_mutex.release() addr = self._ready_task_ids.get() + if addr == RuntimeAddress(-1, -1, -1): + return None if self.read_receipt_mutex.locked(): self.read_receipt_mutex.release() @@ -408,6 +416,9 @@ def _try_step_next_ready_task(self) -> None: """Select a task to run, and advance it one step.""" task = self._get_next_ready_task() + if task is None: + return + try: self._active_task = task @@ -451,8 +462,15 @@ def _process_await(self, task: RuntimeTask, future: RuntimeFuture) -> None: m = 'Cannot wait for next results on a complete task.' raise RuntimeError(m) task.wake_on_next = True - - elif box.ready: + # if future._next_flag: + # # Set from Worker.next, implies the task wants the next result + # # if box.ready: + # # m = 'Cannot wait for next results on a complete task.' + # # raise RuntimeError(m) + # task.wake_on_next = True + task.wake_on_next = future._next_flag + + if box.ready: self._ready_task_ids.put(task.return_address) def _process_task_completion(self, task: RuntimeTask, result: Any) -> None: @@ -460,6 +478,10 @@ def _process_task_completion(self, task: RuntimeTask, result: Any) -> None: assert task is self._active_task packaged_result = RuntimeResult(task.return_address, result, self._id) + if task.return_address not in self._tasks: + print(f'Task was cancelled: {task.return_address}, {task.fnargs[0].__name__}') + return + if task.return_address.worker_id == self._id: self._handle_result(packaged_result) self._conn.send((RuntimeMessage.UPDATE, -1)) @@ -491,7 +513,7 @@ def _get_desired_result(self, task: RuntimeTask) -> Any: if task.wake_on_next: fresh_results = box.get_new_results() - assert len(fresh_results) > 0 + # assert len(fresh_results) > 0 return fresh_results assert box.ready @@ -621,7 +643,8 @@ async def next(self, future: RuntimeFuture) -> list[tuple[int, Any]]: returned. Each result is paired with the index of its arguments in the original map call. """ - if future._done: + # if future._done: + if future.mailbox_id not in self._mailboxes: raise RuntimeError('Cannot wait on an already completed result.') future._next_flag = True diff --git a/tests/runtime/test_attached.py b/tests/runtime/test_attached.py index 0c2ecb67e..eb708ff9f 100644 --- a/tests/runtime/test_attached.py +++ b/tests/runtime/test_attached.py @@ -60,7 +60,7 @@ def test_create_workers(num_workers: int) -> None: compiler.close() -def test_one_thread_per_worker() -> None: +def test_two_thread_per_worker() -> None: # On windows we aren't sure how the threads are handeled if sys.platform == 'win32': return @@ -68,7 +68,7 @@ def test_one_thread_per_worker() -> None: compiler = Compiler(num_workers=1) assert compiler.p is not None assert len(psutil.Process(compiler.p.pid).children()) in [1, 2] - assert psutil.Process(compiler.p.pid).children()[0].num_threads() == 1 + assert psutil.Process(compiler.p.pid).children()[0].num_threads() == 2 compiler.close() diff --git a/tests/runtime/test_next.py b/tests/runtime/test_next.py index 30642a7d7..81c45da5a 100644 --- a/tests/runtime/test_next.py +++ b/tests/runtime/test_next.py @@ -29,7 +29,7 @@ async def run(self, circuit: Circuit, data: PassData) -> None: class TestNoDuplicateResultsInTwoNexts(BasePass): async def run(self, circuit: Circuit, data: PassData) -> None: - future = get_runtime().map(sleepi, [0.3, 0.4, 0.1, 0.2]) + future = get_runtime().map(sleepi, [0.3, 0.4, 0.1, 0.2, 5]) seen = [0] int_ids = await get_runtime().next(future) From 9ea8880bf074b1b64f905a29e32b374050e76969 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 1 Apr 2024 09:33:01 -0400 Subject: [PATCH 006/197] Removed deprecated CompilationTask feature --- bqskit/compiler/compiler.py | 105 +++++++----------------------------- bqskit/ir/circuit.py | 5 +- 2 files changed, 21 insertions(+), 89 deletions(-) diff --git a/bqskit/compiler/compiler.py b/bqskit/compiler/compiler.py index 55bb50a1d..6a6d233e6 100644 --- a/bqskit/compiler/compiler.py +++ b/bqskit/compiler/compiler.py @@ -229,20 +229,18 @@ def __del__(self) -> None: def submit( self, - task_or_circuit: CompilationTask | Circuit, - workflow: WorkflowLike | None = None, + circuit: Circuit, + workflow: WorkflowLike, request_data: bool = False, logging_level: int | None = None, max_logging_depth: int = -1, + data: dict[str, Any] | None = None, ) -> uuid.UUID: """ Submit a compilation job to the Compiler. Args: - task_or_circuit (CompilationTask | Circuit): The task to compile, - or the input circuit. If a task is specified, no other - argument should be specified. If a task is not specified, - the circuit must be paired with a workflow argument. + circuit (Circuit): The input circuit to be compiled. workflow (WorkflowLike): The compilation job submitted is defined by executing this workflow on the input circuit. @@ -267,86 +265,35 @@ def submit( the result of the task. """ # Build CompilationTask - if isinstance(task_or_circuit, CompilationTask): - if workflow is not None: - raise ValueError( - 'Cannot specify workflow and task.' - ' Either specify a workflow and circuit or a task alone.', - ) - - task = task_or_circuit - - else: - if workflow is None: - m = 'Must specify workflow when providing a circuit to submit.' - raise TypeError(m) - - task = CompilationTask(task_or_circuit, Workflow(workflow)) + task = CompilationTask(circuit, Workflow(workflow)) # Set task configuration task.request_data = request_data task.logging_level = logging_level or self._discover_lowest_log_level() task.max_logging_depth = max_logging_depth + if data is not None: + task.data = data # Submit task to runtime self._send(RuntimeMessage.SUBMIT, task) return task.task_id - def status(self, task_id: CompilationTask | uuid.UUID) -> CompilationStatus: + def status(self, task_id: uuid.UUID) -> CompilationStatus: """Retrieve the status of the specified task.""" - if isinstance(task_id, CompilationTask): - warnings.warn( - 'Request a status from a CompilationTask is deprecated.\n' - ' Instead, pass a task ID to request a status.\n' - ' `compiler.submit` returns a task id, and you can get an\n' - ' ID from a task via `task.task_id`.\n' - ' This warning will turn into an error in a future update.', - DeprecationWarning, - ) - task_id = task_id.task_id - assert isinstance(task_id, uuid.UUID) - msg, payload = self._send_recv(RuntimeMessage.STATUS, task_id) if msg != RuntimeMessage.STATUS: raise RuntimeError(f'Unexpected message type: {msg}.') return payload - def result( - self, - task_id: CompilationTask | uuid.UUID, - ) -> Circuit | tuple[Circuit, PassData]: + def result(self, task_id: uuid.UUID) -> Circuit | tuple[Circuit, PassData]: """Block until the task is finished, return its result.""" - if isinstance(task_id, CompilationTask): - warnings.warn( - 'Request a result from a CompilationTask is deprecated.' - ' Instead, pass a task ID to request a result.\n' - ' `compiler.submit` returns a task id, and you can get an\n' - ' ID from a task via `task.task_id`.\n' - ' This warning will turn into an error in a future update.', - DeprecationWarning, - ) - task_id = task_id.task_id - assert isinstance(task_id, uuid.UUID) - msg, payload = self._send_recv(RuntimeMessage.REQUEST, task_id) if msg != RuntimeMessage.RESULT: raise RuntimeError(f'Unexpected message type: {msg}.') return payload - def cancel(self, task_id: CompilationTask | uuid.UUID) -> bool: + def cancel(self, task_id: uuid.UUID) -> bool: """Cancel the execution of a task in the system.""" - if isinstance(task_id, CompilationTask): - warnings.warn( - 'Cancelling a CompilationTask is deprecated. Instead,' - ' Instead, pass a task ID to cancel a task.\n' - ' `compiler.submit` returns a task id, and you can get an\n' - ' ID from a task via `task.task_id`.\n' - ' This warning will turn into an error in a future update.', - DeprecationWarning, - ) - task_id = task_id.task_id - assert isinstance(task_id, uuid.UUID) - msg, _ = self._send_recv(RuntimeMessage.CANCEL, task_id) if msg != RuntimeMessage.CANCEL: raise RuntimeError(f'Unexpected message type: {msg}.') @@ -355,63 +302,51 @@ def cancel(self, task_id: CompilationTask | uuid.UUID) -> bool: @overload def compile( self, - task_or_circuit: CompilationTask, - ) -> Circuit | tuple[Circuit, PassData]: - ... - - @overload - def compile( - self, - task_or_circuit: Circuit, + circuit: Circuit, workflow: WorkflowLike, request_data: Literal[False] = ..., logging_level: int | None = ..., max_logging_depth: int = ..., + data: dict[str, Any] | None = ..., ) -> Circuit: ... @overload def compile( self, - task_or_circuit: Circuit, + circuit: Circuit, workflow: WorkflowLike, request_data: Literal[True], logging_level: int | None = ..., max_logging_depth: int = ..., + data: dict[str, Any] | None = ..., ) -> tuple[Circuit, PassData]: ... @overload def compile( self, - task_or_circuit: Circuit, + circuit: Circuit, workflow: WorkflowLike, request_data: bool, logging_level: int | None = ..., max_logging_depth: int = ..., + data: dict[str, Any] | None = ..., ) -> Circuit | tuple[Circuit, PassData]: ... def compile( self, - task_or_circuit: CompilationTask | Circuit, - workflow: WorkflowLike | None = None, + circuit: Circuit, + workflow: WorkflowLike, request_data: bool = False, logging_level: int | None = None, max_logging_depth: int = -1, + data: dict[str, Any] | None = None, ) -> Circuit | tuple[Circuit, PassData]: """Submit a task, wait for its results; see :func:`submit` for more.""" - if isinstance(task_or_circuit, CompilationTask): - warnings.warn( - 'Manually constructing and compiling CompilationTasks' - ' is deprecated. Instead, call compile directly with' - ' your input circuit and workflow. This warning will' - ' turn into an error in a future update.', - DeprecationWarning, - ) - task_id = self.submit( - task_or_circuit, + circuit, workflow, request_data, logging_level, diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index d58b299a7..49e53cdf5 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -2718,16 +2718,13 @@ def perform( """ from bqskit.compiler.compiler import Compiler from bqskit.compiler.passdata import PassData - from bqskit.compiler.task import CompilationTask pass_data = PassData(self) if data is not None: pass_data.update(data) with Compiler() as compiler: - task = CompilationTask(self, [compiler_pass]) - task.data = pass_data - task_id = compiler.submit(task) + task_id = compiler.submit(self, [compiler_pass], data=pass_data) self.become(compiler.result(task_id)) # type: ignore def instantiate( From 3dd5a245bf0c70a465c904046f19d55436830fc0 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 1 Apr 2024 10:15:20 -0400 Subject: [PATCH 007/197] Fix bug in workflow copy constructor --- bqskit/compiler/workflow.py | 1 + 1 file changed, 1 insertion(+) diff --git a/bqskit/compiler/workflow.py b/bqskit/compiler/workflow.py index 6134d07aa..3c0cdaf4f 100644 --- a/bqskit/compiler/workflow.py +++ b/bqskit/compiler/workflow.py @@ -39,6 +39,7 @@ def __init__(self, passes: WorkflowLike, name: str = '') -> None: """ if isinstance(passes, Workflow): self._passes: list[BasePass] = copy.deepcopy(passes._passes) + self._name = copy.deepcopy(passes._name) if name == '' else name return if isinstance(passes, BasePass): From 4261294e6fc7c678482cf38d56f1a2d4bae31808 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 1 Apr 2024 10:18:59 -0400 Subject: [PATCH 008/197] Better circuit and workflow serialization with dill --- bqskit/compiler/workflow.py | 7 +++ bqskit/ir/circuit.py | 117 +++++++++++++++++++++++++++--------- setup.py | 1 + 3 files changed, 98 insertions(+), 27 deletions(-) diff --git a/bqskit/compiler/workflow.py b/bqskit/compiler/workflow.py index 3c0cdaf4f..65399ff87 100644 --- a/bqskit/compiler/workflow.py +++ b/bqskit/compiler/workflow.py @@ -3,6 +3,7 @@ import copy import logging +import dill from typing import Iterable from typing import Iterator from typing import overload @@ -119,5 +120,11 @@ def __getitem__(self, _key: slice, /) -> list[BasePass]: def __getitem__(self, _key: int | slice) -> BasePass | list[BasePass]: return self._passes.__getitem__(_key) + def __getstate__(self) -> bytes: + return dill.dumps(self.__dict__, recurse=True) + + def __setstate__(self, state: bytes) -> None: + self.__dict__.update(dill.loads(state)) + WorkflowLike = Union[Workflow, Iterable[BasePass], BasePass] diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index 49e53cdf5..2fd69f846 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -4,6 +4,8 @@ import copy import logging import warnings +import pickle +import dill from typing import Any from typing import cast from typing import Collection @@ -1035,33 +1037,8 @@ def point( raise ValueError('No such operation exists in the circuit.') - def append(self, op: Operation) -> int: - """ - Append `op` to the end of the circuit and return its cycle index. - - Args: - op (Operation): The operation to append. - - Returns: - int: The cycle index of the appended operation. - - Raises: - ValueError: If `op` cannot be placed on the circuit due to - either an invalid location or gate radix mismatch. - - Notes: - Due to the circuit being represented as a matrix, - `circuit.append(op)` does not imply `op` is last in simulation - order but it implies `op` is in the last cycle of circuit. - - Examples: - >>> from bqskit.ir.gates import HGate - >>> circ = Circuit(1) - >>> op = Operation(HGate(), [0]) - >>> circ.append(op) # Appends a Hadamard gate to qudit 0. - """ - self.check_valid_operation(op) - cycle_index = self._find_available_or_append_cycle(op.location) + def _append(self, op: Operation, cycle_index: int) -> None: + """Append the operation to the circuit at the specified cycle.""" point = CircuitPoint(cycle_index, op.location[0]) prevs: dict[int, CircuitPoint | None] = {i: None for i in op.location} @@ -1096,6 +1073,34 @@ def append(self, op: Operation) -> int: self._gate_info[op.gate] = 0 self._gate_info[op.gate] += 1 + def append(self, op: Operation) -> int: + """ + Append `op` to the end of the circuit and return its cycle index. + + Args: + op (Operation): The operation to append. + + Returns: + int: The cycle index of the appended operation. + + Raises: + ValueError: If `op` cannot be placed on the circuit due to + either an invalid location or gate radix mismatch. + + Notes: + Due to the circuit being represented as a matrix, + `circuit.append(op)` does not imply `op` is last in simulation + order but it implies `op` is in the last cycle of circuit. + + Examples: + >>> from bqskit.ir.gates import HGate + >>> circ = Circuit(1) + >>> op = Operation(HGate(), [0]) + >>> circ.append(op) # Appends a Hadamard gate to qudit 0. + """ + self.check_valid_operation(op) + cycle_index = self._find_available_or_append_cycle(op.location) + self._append(op, cycle_index) return cycle_index def append_gate( @@ -3238,4 +3243,62 @@ def from_operation(op: Operation) -> Circuit: circuit.append_gate(op.gate, list(range(circuit.num_qudits)), op.params) return circuit + def __reduce__(self): + """Return the pickle state of the circuit.""" + serialized_gates = [] + gate_table = {} + for gate in self.gate_set: + gate_table[gate] = len(serialized_gates) + if gate.__class__.__module__.startswith('bqskit'): + serialized_gates.append((False, pickle.dumps(gate))) + else: + serialized_gates.append((True, dill.dumps(gate, recurse=True))) + + cycles = [] + last_cycle = -1 + for cycle, op in self.operations_with_cycles(): + + if cycle != last_cycle: + last_cycle = cycle + cycles.append([]) + + marshalled_op = ( + gate_table[op.gate], + op.location._location, + op.params + ) + cycles[-1].append(marshalled_op) + + data = ( + self.num_qudits, + self.radixes, + serialized_gates, + pickle.dumps(cycles), + ) + return (rebuild_circuit, data) + # endregion + + +def rebuild_circuit(num_qudits, radixes, serialized_gates, serialized_cycles) -> Circuit: + """Rebuild a circuit from a pickle state.""" + circuit = Circuit(num_qudits, radixes) + + gate_table = {} + for i, (is_dill, serialized_gate) in enumerate(serialized_gates): + if is_dill: + gate = dill.loads(serialized_gate) + else: + gate = pickle.loads(serialized_gate) + gate_table[i] = gate + + cycles = pickle.loads(serialized_cycles) + for i, cycle in enumerate(cycles): + circuit._append_cycle() + for marshalled_op in cycle: + gate = gate_table[marshalled_op[0]] + location = marshalled_op[1] + params = marshalled_op[2] + circuit._append(Operation(gate, location, params), i) + + return circuit diff --git a/setup.py b/setup.py index 935ba74ad..298874919 100644 --- a/setup.py +++ b/setup.py @@ -71,6 +71,7 @@ 'numpy>=1.22.0', 'scipy>=1.8.0', 'typing-extensions>=4.0.0', + 'dill>=0.3.8' ], python_requires='>=3.8, <4', entry_points={ From 99fce350121f3b9c1631d4d9ac7918489b69e658 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 1 Apr 2024 10:19:48 -0400 Subject: [PATCH 009/197] Faster node shutdown procedure --- bqskit/runtime/base.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index 0a1fd0a31..3a7072273 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -57,19 +57,26 @@ def __init__( waiting message. """ - def shutdown(self) -> None: - """Shutdown the employee.""" + def initiate_shutdown(self) -> None: + """Instruct employee to shutdown.""" try: self.conn.send((RuntimeMessage.SHUTDOWN, None)) except Exception: pass + def complete_shutdown(self) -> None: + """Ensure employee is shutdown and clean up resources.""" if self.process is not None: self.process.join() self.process = None self.conn.close() + def shutdown(self) -> None: + """Initiate and complete shutdown.""" + self.initiate_shutdown() + self.complete_shutdown() + @property def has_idle_resources(self) -> bool: return self.num_idle_workers > 0 @@ -269,6 +276,7 @@ def spawn_workers( for i in range(num_workers): w_id = self.lower_id_bound + i procs[w_id] = Process(target=start_worker, args=(w_id, port)) + procs[w_id].daemon = True procs[w_id].start() self.logger.debug(f'Stated worker process {i}.') @@ -451,7 +459,11 @@ def handle_shutdown(self) -> None: # Instruct employees to shutdown for employee in self.employees: - employee.shutdown() + employee.initiate_shutdown() + + for employee in self.employees: + employee.complete_shutdown() + self.employees.clear() self.logger.debug('Shutdown employees.') From d4d6122570d33aaca0b114ecadb2e06fb056137b Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 1 Apr 2024 10:32:35 -0400 Subject: [PATCH 010/197] Better and lazy de/serialization of RuntimeTasks --- bqskit/runtime/task.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index c9f582804..f36d540dc 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -3,6 +3,7 @@ import inspect import logging +import dill from typing import Any from typing import Coroutine @@ -39,7 +40,9 @@ def __init__( RuntimeTask.task_counter += 1 self.task_id = RuntimeTask.task_counter - self.fnargs = fnargs + self.serialized_fnargs = dill.dumps(fnargs) + self._fnargs = None + self._name = fnargs[0].__name__ """Tuple of function pointer, arguments, and keyword arguments.""" self.return_address = return_address @@ -76,6 +79,13 @@ def __init__( self.wake_on_next: bool = False """Set to true if this task should wake immediately on a result.""" + @property + def fnargs(self) -> tuple[Any, Any, Any]: + """Return the function pointer, arguments, and keyword arguments.""" + if self._fnargs is None: + self._fnargs = dill.loads(self.serialized_fnargs) + return self._fnargs + def step(self, send_val: Any = None) -> Any: """Execute one step of the task.""" if self.coro is None: @@ -122,8 +132,8 @@ def is_descendant_of(self, addr: RuntimeAddress) -> bool: def __str__(self) -> str: """Return a string representation of the task.""" - return f'{self.fnargs[0].__name__}' + return f'{self._name}' def __repr__(self) -> str: """Return a string representation of the task.""" - return f'' + return f'' From 66d38da0008ffa0fc7989413ae4f74e3f398cf1d Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 1 Apr 2024 10:41:09 -0400 Subject: [PATCH 011/197] Somewhat better error handling in worker --- bqskit/runtime/worker.py | 38 ++++++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 103f9eba6..0b9066b90 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -129,16 +129,24 @@ def handle_incoming_comms(worker: Worker) -> None: """Handle all incoming messages.""" while True: # Handle incomming communication - msg, payload = worker._conn.recv() + try: + msg, payload = worker._conn.recv() + except Exception: + print(f'Worker {worker._id} crashed due to lost connection') + worker._running = False + worker._ready_task_ids.put(RuntimeAddress(-1, -1, -1)) + break # Process message if msg == RuntimeMessage.SHUTDOWN: + print(f'Worker {worker._id} received shutdown message') worker._running = False worker._ready_task_ids.put(RuntimeAddress(-1, -1, -1)) # TODO: Interupt main, maybe even kill it - return + break elif msg == RuntimeMessage.SUBMIT: + # print('Worker received submit message') worker.read_receipt_mutex.acquire() task = cast(RuntimeTask, payload) worker.most_recent_read_submit = task.unique_id @@ -294,7 +302,16 @@ def _loop(self) -> None: """Main worker event loop.""" self._running = True while self._running: - self._try_step_next_ready_task() + try: + self._try_step_next_ready_task() + except Exception: + self._running = False + exc_info = sys.exc_info() + error_str = ''.join(traceback.format_exception(*exc_info)) + try: + self._conn.send((RuntimeMessage.ERROR, error_str)) + except Exception: + pass # self._try_idle() # self._handle_comms() @@ -383,17 +400,20 @@ def _get_next_ready_task(self) -> RuntimeTask: self.read_receipt_mutex.acquire() try: addr = self._ready_task_ids.get_nowait() + except Empty: payload = (1, self.most_recent_read_submit) self._conn.send((RuntimeMessage.WAITING, payload)) self.read_receipt_mutex.release() addr = self._ready_task_ids.get() - if addr == RuntimeAddress(-1, -1, -1): - return None - if self.read_receipt_mutex.locked(): + else: self.read_receipt_mutex.release() + # Handle a shutdown request that occured while waiting + if not self._running: + return None + if addr in self._cancelled_task_ids or addr not in self._tasks: # When a task is cancelled on the worker it is not removed # from the ready queue because it is much cheaper to just @@ -456,12 +476,6 @@ def _process_await(self, task: RuntimeTask, future: RuntimeFuture) -> None: box.dest_addr = task.return_address task.desired_box_id = future.mailbox_id - if future._next_flag: - # Set from Worker.next, implies the task wants the next result - if box.ready: - m = 'Cannot wait for next results on a complete task.' - raise RuntimeError(m) - task.wake_on_next = True # if future._next_flag: # # Set from Worker.next, implies the task wants the next result # # if box.ready: From 72c073dc815d1e112da02eaa969b72955423b033 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 1 Apr 2024 10:42:31 -0400 Subject: [PATCH 012/197] Export sys.path on client connection --- bqskit/compiler/compiler.py | 3 +-- bqskit/runtime/detached.py | 16 ++++++++++++++-- bqskit/runtime/manager.py | 7 +++++++ bqskit/runtime/message.py | 1 + bqskit/runtime/worker.py | 4 ++++ 5 files changed, 27 insertions(+), 4 deletions(-) diff --git a/bqskit/compiler/compiler.py b/bqskit/compiler/compiler.py index 6a6d233e6..778521d00 100644 --- a/bqskit/compiler/compiler.py +++ b/bqskit/compiler/compiler.py @@ -9,7 +9,6 @@ import sys import time import uuid -import warnings from multiprocessing.connection import Client from multiprocessing.connection import Connection from subprocess import Popen @@ -149,7 +148,7 @@ def _connect_to_server(self, ip: str, port: int) -> None: self.old_signal = signal.signal(signal.SIGINT, handle) if self.conn is None: raise RuntimeError('Connection unexpectedly none.') - self.conn.send((RuntimeMessage.CONNECT, None)) + self.conn.send((RuntimeMessage.CONNECT, sys.path)) _logger.debug('Successfully connected to runtime server.') return raise RuntimeError('Client connection refused') diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index d86e94cfa..02d2bf25b 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -30,7 +30,6 @@ from bqskit.runtime.result import RuntimeResult from bqskit.runtime.task import RuntimeTask - def listen(server: DetachedServer, port: int) -> None: """Listening thread listens for client connections.""" listener = Listener(('0.0.0.0', port)) @@ -131,7 +130,15 @@ def handle_message( if direction == MessageDirection.CLIENT: if msg == RuntimeMessage.CONNECT: - pass + # paths, serialized_defintions = cast(List[str], payload) + paths = cast(List[str], payload) + import sys + for path in paths: + if path not in sys.path: + sys.path.append(path) + for employee in self.employees: + employee.conn.send((RuntimeMessage.IMPORTPATH, path)) + elif msg == RuntimeMessage.DISCONNECT: self.handle_disconnect(conn) @@ -370,6 +377,11 @@ def handle_error(self, error_payload: tuple[int, str]) -> None: conn = self.tasks[self.mailbox_to_task_dict[tid]][1] self.outgoing.put((conn, RuntimeMessage.ERROR, error_payload[1])) # TODO: Broadcast cancel to all tasks with compilation task id tid + # But avoid double broadcasting it. If the client crashes due to + # this error, which it may not, then we will quickly process + # a handle_disconnect and call the cancel anyways. We should + # still cancel here incase the client catches the error and + # resubmits a job. def handle_log(self, log_payload: tuple[int, LogRecord]) -> None: """Forward logs to appropriate client.""" diff --git a/bqskit/runtime/manager.py b/bqskit/runtime/manager.py index 4e0f13c76..974ddbae0 100644 --- a/bqskit/runtime/manager.py +++ b/bqskit/runtime/manager.py @@ -151,6 +151,13 @@ def handle_message( elif msg == RuntimeMessage.SHUTDOWN: self.handle_shutdown() + elif msg == RuntimeMessage.IMPORTPATH: + import_path = cast(str, payload) + import sys + sys.path.append(import_path) + for employee in self.employees: + employee.conn.send((RuntimeMessage.IMPORTPATH, import_path)) + else: raise RuntimeError(f'Unexpected message type: {msg.name}') diff --git a/bqskit/runtime/message.py b/bqskit/runtime/message.py index 63f687048..aed9f6cfa 100644 --- a/bqskit/runtime/message.py +++ b/bqskit/runtime/message.py @@ -20,3 +20,4 @@ class RuntimeMessage(IntEnum): CANCEL = 11 WAITING = 12 UPDATE = 13 + IMPORTPATH = 14 diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 0b9066b90..575d10864 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -174,6 +174,10 @@ def handle_incoming_comms(worker: Worker) -> None: worker._handle_cancel(addr) # TODO: preempt? + elif msg == RuntimeMessage.IMPORTPATH: + import_path = cast(str, payload) + sys.path.append(import_path) + class Worker: """ From 67871008cfe8f5263c9a5d3fae2a67c9d10e56cd Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Tue, 2 Apr 2024 08:29:28 -0400 Subject: [PATCH 013/197] pre-commit (ish) --- bqskit/compiler/compiler.py | 13 +++++++------ bqskit/compiler/workflow.py | 5 +++-- bqskit/ir/circuit.py | 26 +++++++++++++++++++------- bqskit/runtime/base.py | 11 +++++++---- bqskit/runtime/detached.py | 6 ++++-- bqskit/runtime/task.py | 6 ++++-- bqskit/runtime/worker.py | 12 +++++++++--- setup.py | 2 +- 8 files changed, 54 insertions(+), 27 deletions(-) diff --git a/bqskit/compiler/compiler.py b/bqskit/compiler/compiler.py index 778521d00..b306004dd 100644 --- a/bqskit/compiler/compiler.py +++ b/bqskit/compiler/compiler.py @@ -14,6 +14,7 @@ from subprocess import Popen from types import FrameType from typing import Literal +from typing import MutableMapping from typing import overload from typing import TYPE_CHECKING @@ -233,7 +234,7 @@ def submit( request_data: bool = False, logging_level: int | None = None, max_logging_depth: int = -1, - data: dict[str, Any] | None = None, + data: MutableMapping[str, Any] | None = None, ) -> uuid.UUID: """ Submit a compilation job to the Compiler. @@ -271,7 +272,7 @@ def submit( task.logging_level = logging_level or self._discover_lowest_log_level() task.max_logging_depth = max_logging_depth if data is not None: - task.data = data + task.data.update(data) # Submit task to runtime self._send(RuntimeMessage.SUBMIT, task) @@ -306,7 +307,7 @@ def compile( request_data: Literal[False] = ..., logging_level: int | None = ..., max_logging_depth: int = ..., - data: dict[str, Any] | None = ..., + data: MutableMapping[str, Any] | None = ..., ) -> Circuit: ... @@ -318,7 +319,7 @@ def compile( request_data: Literal[True], logging_level: int | None = ..., max_logging_depth: int = ..., - data: dict[str, Any] | None = ..., + data: MutableMapping[str, Any] | None = ..., ) -> tuple[Circuit, PassData]: ... @@ -330,7 +331,7 @@ def compile( request_data: bool, logging_level: int | None = ..., max_logging_depth: int = ..., - data: dict[str, Any] | None = ..., + data: MutableMapping[str, Any] | None = ..., ) -> Circuit | tuple[Circuit, PassData]: ... @@ -341,7 +342,7 @@ def compile( request_data: bool = False, logging_level: int | None = None, max_logging_depth: int = -1, - data: dict[str, Any] | None = None, + data: MutableMapping[str, Any] | None = None, ) -> Circuit | tuple[Circuit, PassData]: """Submit a task, wait for its results; see :func:`submit` for more.""" task_id = self.submit( diff --git a/bqskit/compiler/workflow.py b/bqskit/compiler/workflow.py index 65399ff87..9a0ad2677 100644 --- a/bqskit/compiler/workflow.py +++ b/bqskit/compiler/workflow.py @@ -3,7 +3,6 @@ import copy import logging -import dill from typing import Iterable from typing import Iterator from typing import overload @@ -11,6 +10,8 @@ from typing import TYPE_CHECKING from typing import Union +import dill + from bqskit.compiler.basepass import BasePass from bqskit.utils.random import seed_random_sources from bqskit.utils.typing import is_iterable @@ -40,7 +41,7 @@ def __init__(self, passes: WorkflowLike, name: str = '') -> None: """ if isinstance(passes, Workflow): self._passes: list[BasePass] = copy.deepcopy(passes._passes) - self._name = copy.deepcopy(passes._name) if name == '' else name + self._name: str = name if name else copy.deepcopy(passes._name) return if isinstance(passes, BasePass): diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index 2fd69f846..1c628125c 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -3,10 +3,10 @@ import copy import logging -import warnings import pickle -import dill +import warnings from typing import Any +from typing import Callable from typing import cast from typing import Collection from typing import Dict @@ -20,6 +20,7 @@ from typing import Tuple from typing import TYPE_CHECKING +import dill import numpy as np import numpy.typing as npt @@ -3243,9 +3244,15 @@ def from_operation(op: Operation) -> Circuit: circuit.append_gate(op.gate, list(range(circuit.num_qudits)), op.params) return circuit - def __reduce__(self): + def __reduce__(self) -> tuple[ + Callable[ + [int, tuple[int, ...], list[tuple[bool, bytes]], bytes], + Circuit, + ], + tuple[int, tuple[int, ...], list[tuple[bool, bytes]], bytes], + ]: """Return the pickle state of the circuit.""" - serialized_gates = [] + serialized_gates: list[tuple[bool, bytes]] = [] gate_table = {} for gate in self.gate_set: gate_table[gate] = len(serialized_gates) @@ -3254,7 +3261,7 @@ def __reduce__(self): else: serialized_gates.append((True, dill.dumps(gate, recurse=True))) - cycles = [] + cycles: list[list[tuple[int, tuple[int, ...], list[float]]]] = [] last_cycle = -1 for cycle, op in self.operations_with_cycles(): @@ -3265,7 +3272,7 @@ def __reduce__(self): marshalled_op = ( gate_table[op.gate], op.location._location, - op.params + op.params, ) cycles[-1].append(marshalled_op) @@ -3280,7 +3287,12 @@ def __reduce__(self): # endregion -def rebuild_circuit(num_qudits, radixes, serialized_gates, serialized_cycles) -> Circuit: +def rebuild_circuit( + num_qudits: int, + radixes: tuple[int, ...], + serialized_gates: list[tuple[bool, bytes]], + serialized_cycles: bytes, +) -> Circuit: """Rebuild a circuit from a pickle state.""" circuit = Circuit(num_qudits, radixes) diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index 3a7072273..f4897b5ec 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -556,10 +556,13 @@ def schedule_tasks(self, tasks: Sequence[RuntimeTask]) -> None: """Schedule tasks between this node's employees.""" if len(tasks) == 0: return - assignments = self.assign_tasks(tasks) - - # for e, assignment in sorted(zip(self.employees, assignments), key=lambda x: x[0].num_idle_workers, reverse=True): - for e, assignment in zip(self.employees, assignments): + assignments = zip(self.employees, self.assign_tasks(tasks)) + sorted_assignments = sorted( + assignments, + key=lambda x: x[0].num_idle_workers, + reverse=True, + ) + for e, assignment in sorted_assignments: num_tasks = len(assignment) if num_tasks == 0: diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index 02d2bf25b..1c0a9fdf6 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -30,6 +30,7 @@ from bqskit.runtime.result import RuntimeResult from bqskit.runtime.task import RuntimeTask + def listen(server: DetachedServer, port: int) -> None: """Listening thread listens for client connections.""" listener = Listener(('0.0.0.0', port)) @@ -137,8 +138,9 @@ def handle_message( if path not in sys.path: sys.path.append(path) for employee in self.employees: - employee.conn.send((RuntimeMessage.IMPORTPATH, path)) - + employee.conn.send( + (RuntimeMessage.IMPORTPATH, path), + ) elif msg == RuntimeMessage.DISCONNECT: self.handle_disconnect(conn) diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index f36d540dc..ccffa3b9a 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -3,10 +3,11 @@ import inspect import logging -import dill from typing import Any from typing import Coroutine +import dill + from bqskit.runtime.address import RuntimeAddress @@ -41,7 +42,7 @@ def __init__( self.task_id = RuntimeTask.task_counter self.serialized_fnargs = dill.dumps(fnargs) - self._fnargs = None + self._fnargs: tuple[Any, Any, Any] | None = None self._name = fnargs[0].__name__ """Tuple of function pointer, arguments, and keyword arguments.""" @@ -84,6 +85,7 @@ def fnargs(self) -> tuple[Any, Any, Any]: """Return the function pointer, arguments, and keyword arguments.""" if self._fnargs is None: self._fnargs = dill.loads(self.serialized_fnargs) + assert self._fnargs is not None # for type checker return self._fnargs def step(self, send_val: Any = None) -> Any: diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 575d10864..7c4c8434d 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -358,8 +358,11 @@ def _handle_result(self, result: RuntimeResult) -> None: task = self._tasks[box.dest_addr] if task.wake_on_next or box.ready: + # print(f'Worker {self._id} is waking task + # {task.return_address}, with {task.wake_on_next=}, + # {box.ready=}') self._ready_task_ids.put(box.dest_addr) # Wake it - box.dest_addr = None # Prevent double wake + box.dest_addr = None # Prevent double wake def _handle_cancel(self, addr: RuntimeAddress) -> None: """ @@ -394,7 +397,7 @@ def _handle_cancel(self, addr: RuntimeAddress) -> None: if not t.is_descendant_of(addr) ] - def _get_next_ready_task(self) -> RuntimeTask: + def _get_next_ready_task(self) -> RuntimeTask | None: """Return the next ready task if one exists, otherwise block.""" while True: if self._ready_task_ids.empty() and len(self._delayed_tasks) > 0: @@ -487,6 +490,8 @@ def _process_await(self, task: RuntimeTask, future: RuntimeFuture) -> None: # # raise RuntimeError(m) # task.wake_on_next = True task.wake_on_next = future._next_flag + # print(f'Worker {self._id} is waiting on task + # {task.return_address}, with {task.wake_on_next=}') if box.ready: self._ready_task_ids.put(task.return_address) @@ -497,7 +502,8 @@ def _process_task_completion(self, task: RuntimeTask, result: Any) -> None: packaged_result = RuntimeResult(task.return_address, result, self._id) if task.return_address not in self._tasks: - print(f'Task was cancelled: {task.return_address}, {task.fnargs[0].__name__}') + # print(f'Task was cancelled: {task.return_address}, + # {task.fnargs[0].__name__}') return if task.return_address.worker_id == self._id: diff --git a/setup.py b/setup.py index 298874919..a546d6267 100644 --- a/setup.py +++ b/setup.py @@ -71,7 +71,7 @@ 'numpy>=1.22.0', 'scipy>=1.8.0', 'typing-extensions>=4.0.0', - 'dill>=0.3.8' + 'dill>=0.3.8', ], python_requires='>=3.8, <4', entry_points={ From 886a034b28851bb154787e5fbb67b298033f70df Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Tue, 2 Apr 2024 10:50:06 -0400 Subject: [PATCH 014/197] Avoid unnecessary circuit evals in data update --- bqskit/compiler/passdata.py | 18 ++++++++++++++++++ bqskit/ir/circuit.py | 7 +------ tests/compiler/test_data.py | 6 ++++++ 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/bqskit/compiler/passdata.py b/bqskit/compiler/passdata.py index 160d4f44e..23d584712 100644 --- a/bqskit/compiler/passdata.py +++ b/bqskit/compiler/passdata.py @@ -252,6 +252,24 @@ def __contains__(self, _o: object) -> bool: in_data = self._data.__contains__(_o) return in_resv or in_data + def update(self, other: Any = (), /, **kwds: Any) -> None: + """Update the data with key-values pairs from `other` and `kwds`.""" + if isinstance(other, PassData): + for key in other: + # Handle target specially to avoid circuit evaluation + if key == 'target': + self._target = other._target + continue + + self[key] = other[key] + + for key, value in kwds.items(): + self[key] = value + + return + + super().update(other, **kwds) + def copy(self) -> PassData: """Returns a deep copy of the data.""" return copy.deepcopy(self) diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index 1c628125c..2f1e8758b 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -2723,14 +2723,9 @@ def perform( :class:`~bqskit.compiler.compiler.Compiler` directly. """ from bqskit.compiler.compiler import Compiler - from bqskit.compiler.passdata import PassData - - pass_data = PassData(self) - if data is not None: - pass_data.update(data) with Compiler() as compiler: - task_id = compiler.submit(self, [compiler_pass], data=pass_data) + task_id = compiler.submit(self, [compiler_pass], data=data) self.become(compiler.result(task_id)) # type: ignore def instantiate( diff --git a/tests/compiler/test_data.py b/tests/compiler/test_data.py index 075a95c82..934215db6 100644 --- a/tests/compiler/test_data.py +++ b/tests/compiler/test_data.py @@ -26,3 +26,9 @@ def test_update_error_mul() -> None: assert data.error == 0.75 data.update_error_mul(0.5) assert data.error == 0.875 + + +def test_target_doesnt_get_expanded_on_update() -> None: + data = PassData(Circuit(64)) + data2 = PassData(Circuit(64)) + data.update(data2) # Should not crash From 9942a0a57e2e3a8298dfac71e70e789320e4a7c1 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Tue, 2 Apr 2024 15:46:02 -0400 Subject: [PATCH 015/197] Mock dill for docs --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index cfd238319..f48262b93 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -100,7 +100,7 @@ 'pytket', 'cirq', 'qutip', - 'qiskit', + 'dill', ] nbsphinx_allow_errors = True nbsphinx_execute = 'never' From 50f3ad8191e9a8f9c14b5c17d3c92af95bf32120 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Tue, 2 Apr 2024 15:50:02 -0400 Subject: [PATCH 016/197] Fixed python 3.8 type issue --- bqskit/runtime/detached.py | 3 ++- bqskit/runtime/manager.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index 1c0a9fdf6..607af21ed 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -17,6 +17,7 @@ from typing import List from typing import Optional from typing import Sequence +from typing import Tuple from bqskit.compiler.status import CompilationStatus from bqskit.compiler.task import CompilationTask @@ -192,7 +193,7 @@ def handle_message( self.handle_shutdown() elif msg == RuntimeMessage.WAITING: - p = cast(tuple[int, Optional[RuntimeAddress]], payload) + p = cast(Tuple[int, Optional[RuntimeAddress]], payload) num_idle, read_receipt = p self.handle_waiting(conn, num_idle, read_receipt) diff --git a/bqskit/runtime/manager.py b/bqskit/runtime/manager.py index 974ddbae0..c8725bf1c 100644 --- a/bqskit/runtime/manager.py +++ b/bqskit/runtime/manager.py @@ -11,6 +11,7 @@ from typing import List from typing import Optional from typing import Sequence +from typing import Tuple from bqskit.runtime import default_manager_port from bqskit.runtime import default_worker_port @@ -178,7 +179,7 @@ def handle_message( self.handle_result_from_below(result) elif msg == RuntimeMessage.WAITING: - p = cast(tuple[int, Optional[RuntimeAddress]], payload) + p = cast(Tuple[int, Optional[RuntimeAddress]], payload) num_idle, read_receipt = p self.handle_waiting(conn, num_idle, read_receipt) self.update_upstream_idle_workers() From a47be9476a6e689eaaeb5d2dd848a5ebf30f3485 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Tue, 2 Apr 2024 16:29:15 -0400 Subject: [PATCH 017/197] Test mac through CI :( --- tests/runtime/test_attached.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/runtime/test_attached.py b/tests/runtime/test_attached.py index eb708ff9f..3996007fe 100644 --- a/tests/runtime/test_attached.py +++ b/tests/runtime/test_attached.py @@ -68,6 +68,8 @@ def test_two_thread_per_worker() -> None: compiler = Compiler(num_workers=1) assert compiler.p is not None assert len(psutil.Process(compiler.p.pid).children()) in [1, 2] + if sys.platform == 'darwin': + print(psutil.Process(compiler.p.pid).children()[0].threads()) assert psutil.Process(compiler.p.pid).children()[0].num_threads() == 2 compiler.close() From 363c7e5818cac9ed341b6abf3c1b29ae922621e1 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 3 Apr 2024 07:22:24 -0400 Subject: [PATCH 018/197] Lazy imports in __init__ for runtime thread ctrl --- bqskit/__init__.py | 100 +++++------------------------ bqskit/_logging.py | 36 +++++++++++ bqskit/{version.py => _version.py} | 0 bqskit/compiler/compile.py | 7 +- bqskit/ir/__init__.py | 7 ++ setup.py | 2 +- 6 files changed, 64 insertions(+), 88 deletions(-) create mode 100644 bqskit/_logging.py rename bqskit/{version.py => _version.py} (100%) diff --git a/bqskit/__init__.py b/bqskit/__init__.py index da01938ba..83ca808bb 100644 --- a/bqskit/__init__.py +++ b/bqskit/__init__.py @@ -1,92 +1,29 @@ """The Berkeley Quantum Synthesis Toolkit Python Package.""" from __future__ import annotations -import logging -from sys import stdout as _stdout +from typing import Any -import bqskit.runtime -from .version import __version__ # noqa: F401 -from .version import __version_info__ # noqa: F401 -from bqskit.compiler.compile import compile -from bqskit.compiler.machine import MachineModel -from bqskit.ir.circuit import Circuit -from bqskit.ir.lang import register_language as _register_language -from bqskit.ir.lang.qasm2 import OPENQASM2Language as _qasm +from bqskit._logging import disable_logging +from bqskit._logging import enable_logging +from bqskit._version import __version__ # noqa: F401 +from bqskit._version import __version_info__ # noqa: F401 -# Initialize Logging -_logging_initialized = False +def __getattr__(name: str) -> Any: + # Lazy imports + if name == 'compile': + from bqskit.compiler.compile import compile + return compile -def enable_logging(verbose: bool = False) -> None: - """ - Enable logging for BQSKit. + if name == 'Circuit': + from bqskit.ir.circuit import Circuit + return Circuit - Args: - verbose (bool): If set to True, will print more verbose messages. - Defaults to False. - """ - global _logging_initialized - if not _logging_initialized: - _logger = logging.getLogger('bqskit') - _handler = logging.StreamHandler(_stdout) - _handler.setLevel(0) - _fmt_header = '%(asctime)s.%(msecs)03d - %(levelname)-8s |' - _fmt_message = ' %(name)s: %(message)s' - _fmt = _fmt_header + _fmt_message - _formatter = logging.Formatter(_fmt, '%H:%M:%S') - _handler.setFormatter(_formatter) - _logger.addHandler(_handler) - _logging_initialized = True + if name == 'MachineModel': + from bqskit.compiler.machine import MachineModel + return MachineModel - level = logging.DEBUG if verbose else logging.INFO - logging.getLogger('bqskit').setLevel(level) - - -def disable_logging() -> None: - """Disable logging for BQSKit.""" - logging.getLogger('bqskit').setLevel(logging.CRITICAL) - - -def enable_dashboard() -> None: - import warnings - warnings.warn( - 'Dask has been removed from BQSKit. As a result, the' - ' enable_dashboard method has been removed.' - 'This warning will turn into an error in a future update.', - DeprecationWarning, - ) - - -def disable_dashboard() -> None: - import warnings - warnings.warn( - 'Dask has been removed from BQSKit. As a result, the' - ' disable_dashboard method has been removed.' - 'This warning will turn into an error in a future update.', - DeprecationWarning, - ) - - -def disable_parallelism() -> None: - import warnings - warnings.warn( - 'The disable_parallelism method has been removed.' - ' Instead, set the "num_workers" parameter to 1 during ' - 'Compiler construction. This warning will turn into' - 'an error in a future update.', - DeprecationWarning, - ) - - -def enable_parallelism() -> None: - import warnings - warnings.warn( - 'The enable_parallelism method has been removed.' - ' Instead, set the "num_workers" parameter to 1 during ' - 'Compiler construction. This warning will turn into' - 'an error in a future update.', - DeprecationWarning, - ) + raise AttributeError(f'module {__name__} has no attribute {name}') __all__ = [ @@ -96,6 +33,3 @@ def enable_parallelism() -> None: 'enable_logging', 'disable_logging', ] - -# Register supported languages -_register_language('qasm', _qasm()) diff --git a/bqskit/_logging.py b/bqskit/_logging.py new file mode 100644 index 000000000..73ec1bef8 --- /dev/null +++ b/bqskit/_logging.py @@ -0,0 +1,36 @@ +"""This module contains the logging configuration and methods for BQSKit.""" +import logging +from sys import stdout as _stdout + + +_logging_initialized = False + + +def enable_logging(verbose: bool = False) -> None: + """ + Enable logging for BQSKit. + + Args: + verbose (bool): If set to True, will print more verbose messages. + Defaults to False. + """ + global _logging_initialized + if not _logging_initialized: + _logger = logging.getLogger('bqskit') + _handler = logging.StreamHandler(_stdout) + _handler.setLevel(0) + _fmt_header = '%(asctime)s.%(msecs)03d - %(levelname)-8s |' + _fmt_message = ' %(name)s: %(message)s' + _fmt = _fmt_header + _fmt_message + _formatter = logging.Formatter(_fmt, '%H:%M:%S') + _handler.setFormatter(_formatter) + _logger.addHandler(_handler) + _logging_initialized = True + + level = logging.DEBUG if verbose else logging.INFO + logging.getLogger('bqskit').setLevel(level) + + +def disable_logging() -> None: + """Disable logging for BQSKit.""" + logging.getLogger('bqskit').setLevel(logging.CRITICAL) diff --git a/bqskit/version.py b/bqskit/_version.py similarity index 100% rename from bqskit/version.py rename to bqskit/_version.py diff --git a/bqskit/compiler/compile.py b/bqskit/compiler/compile.py index 87db9db93..e8d4aa90d 100644 --- a/bqskit/compiler/compile.py +++ b/bqskit/compiler/compile.py @@ -2,6 +2,7 @@ from __future__ import annotations import logging +import math import warnings from typing import Any from typing import Literal @@ -10,8 +11,6 @@ from typing import TYPE_CHECKING from typing import Union -import numpy as np - from bqskit.compiler.compiler import Compiler from bqskit.compiler.machine import MachineModel from bqskit.compiler.passdata import PassData @@ -582,7 +581,7 @@ def type_and_check_input(input: CompilationInputLike) -> CompilationInput: if error_threshold is not None: for i, data in enumerate(datas): error = data.error - nonsq_error = 1 - np.sqrt(max(1 - (error * error), 0)) + nonsq_error = 1 - math.sqrt(max(1 - (error * error), 0)) if nonsq_error > error_threshold: warnings.warn( 'Upper bound on error is greater than set threshold:' @@ -631,7 +630,7 @@ def type_and_check_input(input: CompilationInputLike) -> CompilationInput: # Log error if necessary if error_threshold is not None: error = data.error - nonsq_error = 1 - np.sqrt(max(1 - (error * error), 0)) + nonsq_error = 1 - math.sqrt(max(1 - (error * error), 0)) if nonsq_error > error_threshold: warnings.warn( 'Upper bound on error is greater than set threshold:' diff --git a/bqskit/ir/__init__.py b/bqskit/ir/__init__.py index 10d0e4342..9959e3f04 100644 --- a/bqskit/ir/__init__.py +++ b/bqskit/ir/__init__.py @@ -62,6 +62,8 @@ from bqskit.ir.interval import CycleInterval from bqskit.ir.interval import IntervalLike from bqskit.ir.iterator import CircuitIterator +from bqskit.ir.lang import register_language as _register_language +from bqskit.ir.lang.qasm2 import OPENQASM2Language as _qasm from bqskit.ir.location import CircuitLocation from bqskit.ir.location import CircuitLocationLike from bqskit.ir.operation import Operation @@ -71,6 +73,11 @@ from bqskit.ir.region import CircuitRegionLike from bqskit.ir.structure import CircuitStructure + +# Register supported languages +_register_language('qasm', _qasm()) + + __all__ = [ 'Operation', 'Circuit', diff --git a/setup.py b/setup.py index a546d6267..0c02a9dec 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ root_dir_path = os.path.abspath(os.path.dirname(__file__)) pkg_dir_path = os.path.join(root_dir_path, 'bqskit') readme_path = os.path.join(root_dir_path, 'README.md') -version_path = os.path.join(pkg_dir_path, 'version.py') +version_path = os.path.join(pkg_dir_path, '_version.py') # Load Version Number with open(version_path) as version_file: From 011142a7bacacaf545781974b9400b19899539e5 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 3 Apr 2024 07:26:33 -0400 Subject: [PATCH 019/197] Skip thread counting test on macOS --- tests/runtime/test_attached.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/runtime/test_attached.py b/tests/runtime/test_attached.py index 3996007fe..db70ec8bc 100644 --- a/tests/runtime/test_attached.py +++ b/tests/runtime/test_attached.py @@ -61,15 +61,15 @@ def test_create_workers(num_workers: int) -> None: def test_two_thread_per_worker() -> None: - # On windows we aren't sure how the threads are handeled if sys.platform == 'win32': - return + pytest.skip('Not sure how to count threads on Windows.') + + if sys.platform == 'darwin': + pytest.skip('MacOS requires permissions to count threads.') compiler = Compiler(num_workers=1) assert compiler.p is not None assert len(psutil.Process(compiler.p.pid).children()) in [1, 2] - if sys.platform == 'darwin': - print(psutil.Process(compiler.p.pid).children()[0].threads()) assert psutil.Process(compiler.p.pid).children()[0].num_threads() == 2 compiler.close() From 7571d0118ab320e8a4d4ab8da4ec76529b104efa Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 3 Apr 2024 07:27:39 -0400 Subject: [PATCH 020/197] pre-commit --- bqskit/_logging.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bqskit/_logging.py b/bqskit/_logging.py index 73ec1bef8..59b079156 100644 --- a/bqskit/_logging.py +++ b/bqskit/_logging.py @@ -1,4 +1,6 @@ """This module contains the logging configuration and methods for BQSKit.""" +from __future__ import annotations + import logging from sys import stdout as _stdout From d4ff91726f274961be68799b0be4a183f8d3e8e0 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 3 Apr 2024 08:31:41 -0400 Subject: [PATCH 021/197] Addresses #211 --- bqskit/compiler/compiler.py | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/bqskit/compiler/compiler.py b/bqskit/compiler/compiler.py index b306004dd..00f2b54ab 100644 --- a/bqskit/compiler/compiler.py +++ b/bqskit/compiler/compiler.py @@ -109,7 +109,7 @@ def __init__( ip = 'localhost' self._start_server(num_workers, runtime_log_level, worker_port) - self._connect_to_server(ip, port) + self._connect_to_server(ip, port, self.p is not None) def _start_server( self, @@ -132,17 +132,36 @@ def _start_server( self.p = Popen([sys.executable, '-c', launch_str], creationflags=flags) _logger.debug('Starting runtime server process.') - def _connect_to_server(self, ip: str, port: int) -> None: + def _connect_to_server(self, ip: str, port: int, attached: bool) -> None: """Connect to a runtime server at `ip` and `port`.""" max_retries = 8 wait_time = .25 - for _ in range(max_retries): + current_retry = 0 + while current_retry < max_retries or attached: try: family = 'AF_INET' if sys.platform == 'win32' else None conn = Client((ip, port), family) except ConnectionRefusedError: + if wait_time > 4: + _logger.warning( + 'Connection refused by runtime server.' + ' Retrying in %s seconds.', wait_time, + ) + if wait_time > 16 and attached: + _logger.warning( + 'Connection is still refused by runtime server.' + ' This may be due to the server not being started.' + ' You may want to check the server logs, by starting' + ' the compiler with "runtime_log_level" set. You' + ' can also try launching the bqskit runtime in' + ' detached mode. See the bqskit runtime documentation' + ' for more information:' + ' https://bqskit.readthedocs.io/en/latest/guides/' + 'distributing.html', + ) time.sleep(wait_time) wait_time *= 2 + current_retry += 1 else: self.conn = conn handle = functools.partial(sigint_handler, compiler=self) From a1b106b738f6b7aab899cb3f3add1f4442c91936 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 3 Apr 2024 08:47:20 -0400 Subject: [PATCH 022/197] Fixes #181 --- bqskit/runtime/base.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index f4897b5ec..52fed8556 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -487,10 +487,6 @@ def handle_disconnect(self, conn: Connection) -> None: if conn in self.conn_to_employee_dict: self.handle_shutdown() - def __del__(self) -> None: - """Ensure resources are cleaned up.""" - self.handle_shutdown() - def assign_tasks( self, tasks: Sequence[RuntimeTask], From 1004c05de4e8b53fdce35dcb33261954fba396bb Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 3 Apr 2024 10:30:59 -0400 Subject: [PATCH 023/197] Attempt to Fix #213 --- bqskit/compiler/compiler.py | 18 +++++++++++++----- tests/runtime/test_attached.py | 20 ++++++++++---------- 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/bqskit/compiler/compiler.py b/bqskit/compiler/compiler.py index 00f2b54ab..019a8fc10 100644 --- a/bqskit/compiler/compiler.py +++ b/bqskit/compiler/compiler.py @@ -103,7 +103,7 @@ def __init__( self.p: Popen | None = None # type: ignore self.conn: Connection | None = None - atexit.register(self.close) + _compiler_instances.add(self) if ip is None: ip = 'localhost' @@ -240,11 +240,10 @@ def close(self) -> None: # Reset interrupt signal handler and remove exit handler if hasattr(self, 'old_signal'): signal.signal(signal.SIGINT, self.old_signal) + del self.old_signal - def __del__(self) -> None: - self.close() - atexit.unregister(self.close) - _logger.debug('Compiler successfully shutdown.') + _compiler_instances.discard(self) + _logger.debug('Compiler has been closed.') def submit( self, @@ -484,3 +483,12 @@ def sigint_handler(signum: int, frame: FrameType, compiler: Compiler) -> None: _logger.critical('Compiler interrupted.') compiler.close() raise KeyboardInterrupt + + +_compiler_instances: set[Compiler] = set() + + +@atexit.register +def _cleanup_compiler_instances() -> None: + for compiler in list(_compiler_instances): + compiler.close() diff --git a/tests/runtime/test_attached.py b/tests/runtime/test_attached.py index db70ec8bc..aa155dc3c 100644 --- a/tests/runtime/test_attached.py +++ b/tests/runtime/test_attached.py @@ -17,16 +17,16 @@ from bqskit.runtime import get_runtime -@pytest.mark.parametrize('num_workers', [1, -1]) -def test_startup_shutdown_transparently(num_workers: int) -> None: - in_num_childs = len(psutil.Process(os.getpid()).children(recursive=True)) - compiler = Compiler(num_workers=num_workers) - assert compiler.p is not None - compiler.__del__() - if sys.platform == 'win32': - time.sleep(1) - out_num_childs = len(psutil.Process(os.getpid()).children(recursive=True)) - assert in_num_childs == out_num_childs +# @pytest.mark.parametrize('num_workers', [1, -1]) +# def test_startup_shutdown_transparently(num_workers: int) -> None: +# in_num_childs = len(psutil.Process(os.getpid()).children(recursive=True)) +# compiler = Compiler(num_workers=num_workers) +# assert compiler.p is not None +# compiler.__del__() +# if sys.platform == 'win32': +# time.sleep(1) +# out_num_childs = len(psutil.Process(os.getpid()).children(recursive=True)) +# assert in_num_childs == out_num_childs @pytest.mark.parametrize('num_workers', [1, -1]) From 20859d022256a4e71184e186ecc768d9ca8a8123 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 4 Apr 2024 08:01:47 -0400 Subject: [PATCH 024/197] More robust startup and a little cleanup --- bqskit/compiler/compiler.py | 4 +- bqskit/runtime/attached.py | 6 +- bqskit/runtime/base.py | 63 ++++++++------ bqskit/runtime/detached.py | 61 +++++++------- bqskit/runtime/manager.py | 10 +-- bqskit/runtime/message.py | 1 + bqskit/runtime/worker.py | 161 ++++++++++++------------------------ 7 files changed, 129 insertions(+), 177 deletions(-) diff --git a/bqskit/compiler/compiler.py b/bqskit/compiler/compiler.py index 019a8fc10..ba19fbd16 100644 --- a/bqskit/compiler/compiler.py +++ b/bqskit/compiler/compiler.py @@ -168,7 +168,9 @@ def _connect_to_server(self, ip: str, port: int, attached: bool) -> None: self.old_signal = signal.signal(signal.SIGINT, handle) if self.conn is None: raise RuntimeError('Connection unexpectedly none.') - self.conn.send((RuntimeMessage.CONNECT, sys.path)) + msg, payload = self._send_recv(RuntimeMessage.CONNECT, sys.path) + if msg != RuntimeMessage.READY: + raise RuntimeError(f'Unexpected message type: {msg}.') _logger.debug('Successfully connected to runtime server.') return raise RuntimeError('Client connection refused') diff --git a/bqskit/runtime/attached.py b/bqskit/runtime/attached.py index 6cd6a6a98..27a6f7983 100644 --- a/bqskit/runtime/attached.py +++ b/bqskit/runtime/attached.py @@ -59,9 +59,6 @@ def __init__( self.mailboxes: dict[int, ServerMailbox] = {} self.mailbox_counter = 0 - # Start workers - self.spawn_workers(num_workers, worker_port) - # Connect to client client_conn = self.listen_once('localhost', port) self.clients[client_conn] = set() @@ -72,6 +69,9 @@ def __init__( ) self.logger.info('Connected to client.') + # Start workers + self.spawn_workers(num_workers, worker_port, log_level) + def handle_disconnect(self, conn: Connection) -> None: """A client disconnect in attached mode is equal to a shutdown.""" self.handle_shutdown() diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index 52fed8556..206a66fc2 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -97,29 +97,6 @@ def get_num_of_tasks_sent_since( raise RuntimeError('Read receipt not found in submit cache.') -def send_outgoing(node: ServerBase) -> None: - """Outgoing thread forwards messages as they are created.""" - while True: - outgoing = node.outgoing.get() - - if not node.running: - # NodeBase's handle_shutdown will put a dummy value in the - # queue to wake the thread up so it can exit safely. - # Hence the node.running check now rather than in the - # while condition. - break - - outgoing[0].send((outgoing[1], outgoing[2])) - node.logger.debug(f'Sent message {outgoing[1].name}.') - - if outgoing[1] == RuntimeMessage.SUBMIT_BATCH: - node.logger.log(1, f'{len(outgoing[2])}\n') - else: - node.logger.log(1, f'{outgoing[2]}\n') - - node.outgoing.task_done() - - def sigint_handler(signum: int, _: FrameType | None, node: ServerBase) -> None: """Interrupt the node.""" if not node.running: @@ -172,7 +149,7 @@ def __init__(self) -> None: # Start outgoing thread self.outgoing: Queue[tuple[Connection, RuntimeMessage, Any]] = Queue() - self.outgoing_thread = Thread(target=send_outgoing, args=(self,)) + self.outgoing_thread = Thread(target=self.send_outgoing, daemon=True) self.outgoing_thread.start() self.logger.info('Started outgoing thread.') @@ -376,9 +353,34 @@ def listen_once(self, ip: str, port: int) -> Connection: listener.close() return conn + def send_outgoing(self) -> None: + """Outgoing thread forwards messages as they are created.""" + while True: + outgoing = self.outgoing.get() + + if not self.running: + # NodeBase's handle_shutdown will put a dummy value in the + # queue to wake the thread up so it can exit safely. + # Hence the node.running check now rather than in the + # while condition. + break + + if outgoing[0].closed: + continue + + outgoing[0].send((outgoing[1], outgoing[2])) + _logger.debug(f'Sent message {outgoing[1].name}.') + + if outgoing[1] == RuntimeMessage.SUBMIT_BATCH: + _logger.log(1, f'[{outgoing[2][0]}] * {len(outgoing[2])}\n') + else: + _logger.log(1, f'{outgoing[2]}\n') + + self.outgoing.task_done() + def run(self) -> None: """Main loop.""" - self.logger.info(f'{self.__class__.__name__} running...') + _logger.info(f'{self.__class__.__name__} running...') try: while self.running: @@ -592,10 +594,17 @@ def get_employee_responsible_for(self, worker_id: int) -> RuntimeEmployee: employee_id = (worker_id - self.lower_id_bound) // self.step_size return self.employees[employee_id] - def broadcast_cancel(self, addr: RuntimeAddress) -> None: + def broadcast(self, msg: RuntimeMessage, payload: Any) -> None: """Broadcast a cancel message to my employees.""" for employee in self.employees: - self.outgoing.put((employee.conn, RuntimeMessage.CANCEL, addr)) + self.outgoing.put((employee.conn, msg, payload)) + + def handle_importpath(self, paths: list[str]) -> None: + """Update the system path with the given paths.""" + for path in paths: + if path not in sys.path: + sys.path.append(path) + self.broadcast(RuntimeMessage.IMPORTPATH, paths) def handle_waiting( self, diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index 607af21ed..86d69f322 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -1,6 +1,7 @@ """This module implements the DetachedServer runtime.""" from __future__ import annotations +import sys import argparse import logging import selectors @@ -32,25 +33,6 @@ from bqskit.runtime.task import RuntimeTask -def listen(server: DetachedServer, port: int) -> None: - """Listening thread listens for client connections.""" - listener = Listener(('0.0.0.0', port)) - while server.running: - client = listener.accept() - - if server.running: - # We check again that the server is running before registering - # the client because dummy data is sent to unblock - # listener.accept() during server shutdown - server.clients[client] = set() - server.sel.register( - client, - selectors.EVENT_READ, - MessageDirection.CLIENT, - ) - server.logger.debug('Connected and registered new client.') - - listener.close() @dataclass @@ -117,9 +99,29 @@ def __init__( # Start client listener self.port = port - self.listen_thread = Thread(target=listen, args=(self, port)) + self.listen_thread = Thread(target=self.listen, args=(port,)) + self.listen_thread.daemon = True self.listen_thread.start() self.logger.info(f'Started client listener on port {self.port}.') + def listen(self, port: int) -> None: + """Listening thread listens for client connections.""" + listener = Listener(('0.0.0.0', port)) + while self.running: + client = listener.accept() + + if self.running: + # We check again that the server is running before registering + # the client because dummy data is sent to unblock + # listener.accept() during server shutdown + self.clients[client] = set() + self.sel.register( + client, + selectors.EVENT_READ, + MessageDirection.CLIENT, + ) + _logger.debug('Connected and registered new client.') + + listener.close() def handle_message( self, @@ -132,16 +134,8 @@ def handle_message( if direction == MessageDirection.CLIENT: if msg == RuntimeMessage.CONNECT: - # paths, serialized_defintions = cast(List[str], payload) paths = cast(List[str], payload) - import sys - for path in paths: - if path not in sys.path: - sys.path.append(path) - for employee in self.employees: - employee.conn.send( - (RuntimeMessage.IMPORTPATH, path), - ) + self.handle_connect(conn, paths) elif msg == RuntimeMessage.DISCONNECT: self.handle_disconnect(conn) @@ -187,7 +181,7 @@ def handle_message( self.handle_log(payload) elif msg == RuntimeMessage.CANCEL: - self.broadcast_cancel(payload) + self.broadcast(msg, payload) elif msg == RuntimeMessage.SHUTDOWN: self.handle_shutdown() @@ -207,6 +201,11 @@ def handle_message( else: raise RuntimeError(f'Unexpected message from {direction.name}.') + def handle_connect(self, conn: Connection, paths: list[str]) -> None: + """Handle a client connection request.""" + self.handle_importpath(paths) + self.outgoing.put((conn, RuntimeMessage.READY, None)) + def handle_system_error(self, error_str: str) -> None: """ Handle an error in runtime code as opposed to client code. @@ -331,7 +330,7 @@ def handle_cancel_comp_task(self, request: uuid.UUID) -> None: # Forward internal cancel messages addr = RuntimeAddress(-1, mailbox_id, 0) - self.broadcast_cancel(addr) + self.broadcast(RuntimeMessage.CANCEL, addr) # Acknowledge the client's cancel request if not client_conn.closed: diff --git a/bqskit/runtime/manager.py b/bqskit/runtime/manager.py index c8725bf1c..72fdabf25 100644 --- a/bqskit/runtime/manager.py +++ b/bqskit/runtime/manager.py @@ -146,18 +146,14 @@ def handle_message( self.send_result_down(result) elif msg == RuntimeMessage.CANCEL: - addr = cast(RuntimeAddress, payload) - self.broadcast_cancel(addr) + self.broadcast(RuntimeMessage.CANCEL, payload) elif msg == RuntimeMessage.SHUTDOWN: self.handle_shutdown() elif msg == RuntimeMessage.IMPORTPATH: - import_path = cast(str, payload) - import sys - sys.path.append(import_path) - for employee in self.employees: - employee.conn.send((RuntimeMessage.IMPORTPATH, import_path)) + paths = cast(List[str], payload) + self.handle_importpath(paths) else: raise RuntimeError(f'Unexpected message type: {msg.name}') diff --git a/bqskit/runtime/message.py b/bqskit/runtime/message.py index aed9f6cfa..c975099c8 100644 --- a/bqskit/runtime/message.py +++ b/bqskit/runtime/message.py @@ -21,3 +21,4 @@ class RuntimeMessage(IntEnum): WAITING = 12 UPDATE = 13 IMPORTPATH = 14 + READY = 15 diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 7c4c8434d..8c96f0d6a 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -30,31 +30,6 @@ from bqskit.runtime.task import RuntimeTask -class WorkerQueue(): - """The worker's task FIFO queue.""" - - def __init__(self) -> None: - """ - Initialize the worker queue. - - An OrderedDict is used to internally store the task. This prevents the - same task appearing multiple times in the queue, while also ensuring - O(1) operations. - """ - self._queue: OrderedDict[RuntimeAddress, None] = OrderedDict() - - def put(self, addr: RuntimeAddress) -> None: - """Enqueue a task by its address.""" - if addr not in self._queue: - self._queue[addr] = None - - def get(self) -> RuntimeAddress: - """Get the next task to run.""" - return self._queue.popitem(last=False)[0] - - def empty(self) -> bool: - """Check if the queue is empty.""" - return len(self._queue) == 0 @dataclass @@ -125,59 +100,6 @@ def deposit_result(self, result: RuntimeResult) -> None: self.result[slot_id] = result.result -def handle_incoming_comms(worker: Worker) -> None: - """Handle all incoming messages.""" - while True: - # Handle incomming communication - try: - msg, payload = worker._conn.recv() - except Exception: - print(f'Worker {worker._id} crashed due to lost connection') - worker._running = False - worker._ready_task_ids.put(RuntimeAddress(-1, -1, -1)) - break - - # Process message - if msg == RuntimeMessage.SHUTDOWN: - print(f'Worker {worker._id} received shutdown message') - worker._running = False - worker._ready_task_ids.put(RuntimeAddress(-1, -1, -1)) - # TODO: Interupt main, maybe even kill it - break - - elif msg == RuntimeMessage.SUBMIT: - # print('Worker received submit message') - worker.read_receipt_mutex.acquire() - task = cast(RuntimeTask, payload) - worker.most_recent_read_submit = task.unique_id - worker._add_task(task) - worker.read_receipt_mutex.release() - - elif msg == RuntimeMessage.SUBMIT_BATCH: - worker.read_receipt_mutex.acquire() - tasks = cast(List[RuntimeTask], payload) - worker.most_recent_read_submit = tasks[0].unique_id - worker._add_task(tasks.pop()) # Submit one task - worker._delayed_tasks.extend(tasks) # Delay rest - # Delayed tasks have no context and are stored (more-or-less) - # as a function pointer together with the arguments. - # When it gets started, it consumes much more memory, - # so we delay the task start until necessary (at no cost) - worker.read_receipt_mutex.release() - - elif msg == RuntimeMessage.RESULT: - result = cast(RuntimeResult, payload) - worker._handle_result(result) - - elif msg == RuntimeMessage.CANCEL: - addr = cast(RuntimeAddress, payload) - worker._handle_cancel(addr) - # TODO: preempt? - - elif msg == RuntimeMessage.IMPORTPATH: - import_path = cast(str, payload) - sys.path.append(import_path) - class Worker: """ @@ -237,17 +159,12 @@ def __init__(self, id: int, conn: Connection) -> None: self._id = id self._conn = conn - # self._outgoing: list[tuple[RuntimeMessage, Any]] = [] - # self._outgoing: Queue[tuple[RuntimeMessage, Any]] = Queue() - # """Stores outgoing messages to be handled by the event loop.""" - self._tasks: dict[RuntimeAddress, RuntimeTask] = {} """Tracks all started, unfinished tasks on this worker.""" self._delayed_tasks: list[RuntimeTask] = [] """Store all delayed tasks in LIFO order.""" - # self._ready_task_ids: WorkerQueue = WorkerQueue() self._ready_task_ids: Queue[RuntimeAddress] = Queue() """Tasks queued up for execution.""" @@ -257,7 +174,7 @@ def __init__(self, id: int, conn: Connection) -> None: self._active_task: RuntimeTask | None = None """The currently executing task if one is running.""" - self._running = False + self._running = True """Controls if the event loop is running.""" self._mailboxes: dict[int, WorkerMailbox] = {} @@ -291,20 +208,15 @@ def record_factory(*args: Any, **kwargs: Any) -> logging.LogRecord: logging.setLogRecordFactory(record_factory) # Start incoming thread - self.incomming_thread = Thread( - target=handle_incoming_comms, - args=(self,), - ) + self.incomming_thread = Thread(target=self.recv_incoming) self.incomming_thread.daemon = True self.incomming_thread.start() - # self.logger.info('Started incoming thread.') # Communicate that this worker is ready self._conn.send((RuntimeMessage.STARTED, self._id)) def _loop(self) -> None: """Main worker event loop.""" - self._running = True while self._running: try: self._try_step_next_ready_task() @@ -316,24 +228,57 @@ def _loop(self) -> None: self._conn.send((RuntimeMessage.ERROR, error_str)) except Exception: pass - # self._try_idle() - # self._handle_comms() - - # def _try_idle(self) -> None: - # """If there is nothing to do, wait until we receive a message.""" - # empty_outgoing = len(self._outgoing) == 0 - # no_ready_tasks = self._ready_task_ids.empty() - # no_delayed_tasks = len(self._delayed_tasks) == 0 - - # if empty_outgoing and no_ready_tasks and no_delayed_tasks: - # self._conn.send((RuntimeMessage.WAITING, 1)) - # wait([self._conn]) - - # def _flush_outgoing_comms(self) -> None: - # """Handle all outgoing messages.""" - # for out_msg in self._outgoing: - # self._conn.send(out_msg) - # self._outgoing.clear() + + def recv_incoming(self) -> None: + """Continuously receive all incoming messages.""" + while self._running: + # Receive message + try: + msg, payload = self._conn.recv() + except Exception: + _logger.debug('Crashed due to lost connection') + os.kill(os.getpid(), signal.SIGKILL) + + _logger.debug(f'Received message {msg.name}.') + _logger.log(1, f'Payload: {payload}') + + # Process message + if msg == RuntimeMessage.SHUTDOWN: + os.kill(os.getpid(), signal.SIGKILL) + + elif msg == RuntimeMessage.SUBMIT: + self.read_receipt_mutex.acquire() + task = cast(RuntimeTask, payload) + self.most_recent_read_submit = task.unique_id + self._add_task(task) + self.read_receipt_mutex.release() + + elif msg == RuntimeMessage.SUBMIT_BATCH: + self.read_receipt_mutex.acquire() + tasks = cast(List[RuntimeTask], payload) + self.most_recent_read_submit = tasks[0].unique_id + self._add_task(tasks.pop()) # Submit one task + self._delayed_tasks.extend(tasks) # Delay rest + # Delayed tasks have no context and are stored (more-or-less) + # as a function pointer together with the arguments. + # When it gets started, it consumes much more memory, + # so we delay the task start until necessary (at no cost) + self.read_receipt_mutex.release() + + elif msg == RuntimeMessage.RESULT: + result = cast(RuntimeResult, payload) + self._handle_result(result) + + elif msg == RuntimeMessage.CANCEL: + addr = cast(RuntimeAddress, payload) + self._handle_cancel(addr) + # TODO: preempt? + + elif msg == RuntimeMessage.IMPORTPATH: + paths = cast(List[str], payload) + for path in paths: + if path not in sys.path: + sys.path.append(path) def _add_task(self, task: RuntimeTask) -> None: """Start a task and add it to the loop.""" From f9edea39396ea7b0a8eaf8c848361092a1ef2dec Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 4 Apr 2024 08:02:22 -0400 Subject: [PATCH 025/197] Logging overhaul start --- bqskit/compiler/compiler.py | 2 +- bqskit/runtime/attached.py | 28 +++++++++++-------- bqskit/runtime/base.py | 55 +++++++++++++++++++----------------- bqskit/runtime/detached.py | 33 ++++++++++++++-------- bqskit/runtime/manager.py | 18 +++++++++--- bqskit/runtime/task.py | 4 ++- bqskit/runtime/worker.py | 56 +++++++++++++++++++++++++++---------- 7 files changed, 128 insertions(+), 68 deletions(-) diff --git a/bqskit/compiler/compiler.py b/bqskit/compiler/compiler.py index ba19fbd16..ec03d5019 100644 --- a/bqskit/compiler/compiler.py +++ b/bqskit/compiler/compiler.py @@ -122,7 +122,7 @@ def _start_server( See :obj:`~bqskit.runtime.attached.AttachedServer` for more info. """ - params = f'{num_workers}, {runtime_log_level}, {worker_port=}' + params = f'{num_workers}, log_level={runtime_log_level}, {worker_port=}' import_str = 'from bqskit.runtime.attached import start_attached_server' launch_str = f'{import_str}; start_attached_server({params})' if sys.platform == 'win32': diff --git a/bqskit/runtime/attached.py b/bqskit/runtime/attached.py index 27a6f7983..a1067d0fa 100644 --- a/bqskit/runtime/attached.py +++ b/bqskit/runtime/attached.py @@ -15,6 +15,9 @@ from bqskit.runtime.direction import MessageDirection +_logger = logging.getLogger(__name__) + + class AttachedServer(DetachedServer): """ BQSKit Runtime Server in attached mode. @@ -33,6 +36,7 @@ def __init__( num_workers: int = -1, port: int = default_server_port, worker_port: int = default_worker_port, + log_level: int = logging.WARNING, ) -> None: """ Create a server with `num_workers` workers. @@ -50,6 +54,17 @@ def __init__( on. Default can be found in the :obj:`~bqskit.runtime.default_worker_port` global variable. """ + # Initialize runtime logging + logging.getLogger().setLevel(log_level) + _handler = logging.StreamHandler() + _handler.setLevel(0) + _fmt_header = '%(asctime)s.%(msecs)03d - %(levelname)-8s |' + _fmt_message = ' %(module)s: %(message)s' + _fmt = _fmt_header + _fmt_message + _formatter = logging.Formatter(_fmt, '%H:%M:%S') + _handler.setFormatter(_formatter) + logging.getLogger().addHandler(_handler) + ServerBase.__init__(self) # See DetachedServer for more info on the following fields: @@ -67,7 +82,7 @@ def __init__( selectors.EVENT_READ, MessageDirection.CLIENT, ) - self.logger.info('Connected to client.') + _logger.info('Connected to client.') # Start workers self.spawn_workers(num_workers, worker_port, log_level) @@ -77,17 +92,8 @@ def handle_disconnect(self, conn: Connection) -> None: self.handle_shutdown() -def start_attached_server( - num_workers: int, - log_level: int, - **kwargs: Any, -) -> None: +def start_attached_server(num_workers: int, **kwargs: Any) -> None: """Start a runtime server in attached mode.""" - # Initialize runtime logging - _logger = logging.getLogger('bqskit-runtime') - _logger.setLevel(log_level) - _logger.addHandler(logging.StreamHandler()) - # Initialize the server server = AttachedServer(num_workers, **kwargs) diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index 206a66fc2..020a17fa5 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -33,6 +33,9 @@ from bqskit.runtime.worker import start_worker +_logger = logging.getLogger(__name__) + + class RuntimeEmployee: """Data structure for a boss's view of an employee.""" @@ -104,7 +107,7 @@ def sigint_handler(signum: int, _: FrameType | None, node: ServerBase) -> None: node.running = False node.terminate_hotline.send(b'\0') - node.logger.info('Server interrupted.') + _logger.info('Server interrupted.') class ServerBase: @@ -134,9 +137,6 @@ def __init__(self) -> None: self.sel.register(p, selectors.EVENT_READ, MessageDirection.SIGNAL) """Terminate hotline is used to unblock select while running.""" - self.logger = logging.getLogger('bqskit-runtime') - """Logger used to print operational log messages.""" - self.employees: list[RuntimeEmployee] = [] """Tracks this node's employees, which are managers or workers.""" @@ -151,7 +151,7 @@ def __init__(self) -> None: self.outgoing: Queue[tuple[Connection, RuntimeMessage, Any]] = Queue() self.outgoing_thread = Thread(target=self.send_outgoing, daemon=True) self.outgoing_thread.start() - self.logger.info('Started outgoing thread.') + _logger.info('Started outgoing thread.') def connect_to_managers(self, ipports: Sequence[tuple[str, int]]) -> None: """Connect to all managers given by endpoints in `ipports`.""" @@ -167,8 +167,8 @@ def connect_to_managers(self, ipports: Sequence[tuple[str, int]]) -> None: self.upper_id_bound, ) manager_conns.append(self.connect_to_manager(ip, port, lb, ub)) - self.logger.info(f'Connected to manager {i} at {ip}:{port}.') - self.logger.debug(f'Gave bounds {lb=} and {ub=} to manager {i}.') + _logger.info(f'Connected to manager {i} at {ip}:{port}.') + _logger.debug(f'Gave bounds {lb=} and {ub=} to manager {i}.') # Wait for started messages from all managers and register them self.total_workers = 0 @@ -182,11 +182,11 @@ def connect_to_managers(self, ipports: Sequence[tuple[str, int]]) -> None: selectors.EVENT_READ, MessageDirection.BELOW, ) - self.logger.info(f'Registered manager {i} with {num_workers=}.') + _logger.info(f'Registered manager {i} with {num_workers=}.') self.total_workers += num_workers self.num_idle_workers = self.total_workers - self.logger.info(f'Node has {self.total_workers} total workers.') + _logger.info(f'Node has {self.total_workers} total workers.') def connect_to_manager( self, @@ -228,6 +228,7 @@ def spawn_workers( self, num_workers: int = -1, port: int = default_worker_port, + logging_level: int = logging.WARNING, ) -> None: """ Spawn worker processes. @@ -252,10 +253,14 @@ def spawn_workers( procs = {} for i in range(num_workers): w_id = self.lower_id_bound + i - procs[w_id] = Process(target=start_worker, args=(w_id, port)) + procs[w_id] = Process( + target=start_worker, + args=(w_id, port), + kwargs={'logging_level': logging_level} + ) procs[w_id].daemon = True procs[w_id].start() - self.logger.debug(f'Stated worker process {i}.') + _logger.debug(f'Stated worker process {i}.') # Listen for the worker connections family = 'AF_INET' if sys.platform == 'win32' else None @@ -283,12 +288,12 @@ def spawn_workers( selectors.EVENT_READ, MessageDirection.BELOW, ) - self.logger.info(f'Registered worker {i}.') + _logger.debug(f'Registered worker {i}.') self.step_size = 1 self.total_workers = num_workers self.num_idle_workers = num_workers - self.logger.info(f'Node has spawned {num_workers} workers.') + _logger.info(f'Node has spawned {num_workers} workers.') def connect_to_workers( self, @@ -311,7 +316,7 @@ def connect_to_workers( oscount = os.cpu_count() num_workers = oscount if oscount else 1 - self.logger.info(f'Expecting {num_workers} worker connections.') + _logger.info(f'Expecting {num_workers} worker connections.') if self.lower_id_bound + num_workers >= self.upper_id_bound: raise RuntimeError('Insufficient id range for workers.') @@ -338,12 +343,12 @@ def connect_to_workers( selectors.EVENT_READ, MessageDirection.BELOW, ) - self.logger.info(f'Registered worker {i}.') + _logger.info(f'Registered worker {i}.') self.step_size = 1 self.total_workers = num_workers self.num_idle_workers = num_workers - self.logger.info(f'Node has connected to {num_workers} workers.') + _logger.info(f'Node has connected to {num_workers} workers.') def listen_once(self, ip: str, port: int) -> Connection: """Listen on `ip`:`port` for a connection and return on first one.""" @@ -394,7 +399,7 @@ def run(self) -> None: # If interrupted by signal, shutdown and exit if direction == MessageDirection.SIGNAL: - self.logger.debug('Received interrupt signal.') + _logger.debug('Received interrupt signal.') self.handle_shutdown() return @@ -405,11 +410,11 @@ def run(self) -> None: self.handle_disconnect(conn) continue log = f'Received message {msg.name} from {direction.name}.' - self.logger.debug(log) + _logger.debug(log) if msg == RuntimeMessage.SUBMIT_BATCH: - self.logger.log(1, f'{len(payload)}\n') + _logger.log(1, f'[{payload[0]}] * {len(payload)}\n') else: - self.logger.log(1, f'{payload}\n') + _logger.log(1, f'{payload}\n') # Handle message self.handle_message(msg, direction, conn, payload) @@ -417,7 +422,7 @@ def run(self) -> None: except Exception: exc_info = sys.exc_info() error_str = ''.join(traceback.format_exception(*exc_info)) - self.logger.error(error_str) + _logger.error(error_str) self.handle_system_error(error_str) finally: @@ -456,7 +461,7 @@ def handle_system_error(self, error_str: str) -> None: def handle_shutdown(self) -> None: """Shutdown the node and release resources.""" # Stop running - self.logger.info('Shutting down node.') + _logger.info('Shutting down node.') self.running = False # Instruct employees to shutdown @@ -467,17 +472,17 @@ def handle_shutdown(self) -> None: employee.complete_shutdown() self.employees.clear() - self.logger.debug('Shutdown employees.') + _logger.debug('Shutdown employees.') # Close selector self.sel.close() - self.logger.debug('Cleared selector.') + _logger.debug('Cleared selector.') # Close outgoing thread if self.outgoing_thread.is_alive(): self.outgoing.put(b'\0') # type: ignore self.outgoing_thread.join() - self.logger.debug('Joined outgoing thread.') + _logger.debug('Joined outgoing thread.') assert not self.outgoing_thread.is_alive() def handle_disconnect(self, conn: Connection) -> None: diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index 86d69f322..1c447eda4 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -33,6 +33,7 @@ from bqskit.runtime.task import RuntimeTask +_logger = logging.getLogger(__name__) @dataclass @@ -102,7 +103,8 @@ def __init__( self.listen_thread = Thread(target=self.listen, args=(port,)) self.listen_thread.daemon = True self.listen_thread.start() - self.logger.info(f'Started client listener on port {self.port}.') + _logger.info(f'Started client listener on port {self.port}.') + def listen(self, port: int) -> None: """Listening thread listens for client connections.""" listener = Listener(('0.0.0.0', port)) @@ -230,7 +232,7 @@ def handle_shutdown(self) -> None: except Exception: pass self.clients.clear() - self.logger.debug('Cleared clients.') + _logger.debug('Cleared clients.') # Close listener (hasattr checked for attachedserver shutdown) if hasattr(self, 'listen_thread') and self.listen_thread.is_alive(): @@ -242,7 +244,7 @@ def handle_shutdown(self) -> None: dummy_socket.connect(('localhost', self.port)) dummy_socket.close() self.listen_thread.join() - self.logger.debug('Joined listening thread.') + _logger.debug('Joined listening thread.') def handle_disconnect(self, conn: Connection) -> None: """Disconnect a client connection from the runtime.""" @@ -250,7 +252,7 @@ def handle_disconnect(self, conn: Connection) -> None: tasks = self.clients.pop(conn) for task_id in tasks: self.handle_cancel_comp_task(task_id) - self.logger.info('Unregistered client.') + _logger.info('Unregistered client.') def handle_new_comp_task( self, @@ -262,7 +264,7 @@ def handle_new_comp_task( self.tasks[task.task_id] = (mailbox_id, conn) self.mailbox_to_task_dict[mailbox_id] = task.task_id self.mailboxes[mailbox_id] = ServerMailbox() - self.logger.info(f'New CompilationTask: {task.task_id}.') + _logger.info(f'New CompilationTask: {task.task_id}.') self.clients[conn].add(task.task_id) @@ -290,7 +292,7 @@ def handle_request(self, conn: Connection, request: uuid.UUID) -> None: if box.ready: # If the result has already arrived, ship it to the client. - self.logger.info(f'Responding to request for task {request}.') + _logger.info(f'Responding to request for task {request}.') self.outgoing.put((conn, RuntimeMessage.RESULT, box.result)) self.mailboxes.pop(mailbox_id) self.clients[conn].remove(request) @@ -320,7 +322,7 @@ def handle_status(self, conn: Connection, request: uuid.UUID) -> None: def handle_cancel_comp_task(self, request: uuid.UUID) -> None: """Cancel a compilation task in the system.""" - self.logger.info(f'Cancelling: {request}.') + _logger.info(f'Cancelling: {request}.') # Remove task from server data mailbox_id, client_conn = self.tasks[request] @@ -351,10 +353,10 @@ def handle_result(self, result: RuntimeResult) -> None: box = self.mailboxes[mailbox_id] box.result = result.result t_id = self.mailbox_to_task_dict[mailbox_id] - self.logger.info(f'Finished: {t_id}.') + _logger.info(f'Finished: {t_id}.') if box.client_waiting: - self.logger.info(f'Responding to request for task {t_id}.') + _logger.info(f'Responding to request for task {t_id}.') m = (self.tasks[t_id][1], RuntimeMessage.RESULT, box.result) self.outgoing.put(m) self.clients[self.tasks[t_id][1]].remove(t_id) @@ -430,9 +432,16 @@ def start_server() -> None: ipports = parse_ipports(args.managers) # Set up logging - _logger = logging.getLogger('bqskit-runtime') - _logger.setLevel([30, 20, 10, 1][min(args.verbose, 3)]) - _logger.addHandler(logging.StreamHandler()) + log_level = [30, 20, 10, 1][min(args.verbose, 3)] + logging.getLogger().setLevel(log_level) + _handler = logging.StreamHandler() + _handler.setLevel(0) + _fmt_header = '%(asctime)s.%(msecs)03d - %(levelname)-8s |' + _fmt_message = ' %(module)s: %(message)s' + _fmt = _fmt_header + _fmt_message + _formatter = logging.Formatter(_fmt, '%H:%M:%S') + _handler.setFormatter(_formatter) + logging.getLogger().addHandler(_handler) # Import tests package recursively if args.import_tests: diff --git a/bqskit/runtime/manager.py b/bqskit/runtime/manager.py index 72fdabf25..6dcbd4f5c 100644 --- a/bqskit/runtime/manager.py +++ b/bqskit/runtime/manager.py @@ -25,6 +25,9 @@ from bqskit.runtime.task import RuntimeTask +_logger = logging.getLogger(__name__) + + class Manager(ServerBase): """ BQSKit Runtime Manager. @@ -117,7 +120,7 @@ def __init__( # Inform upstream we are starting msg = (self.upstream, RuntimeMessage.STARTED, self.total_workers) self.outgoing.put(msg) - self.logger.info('Sent start message upstream.') + _logger.info('Sent start message upstream.') def handle_message( self, @@ -318,9 +321,16 @@ def start_manager() -> None: ipports = None if args.managers is None else parse_ipports(args.managers) # Set up logging - _logger = logging.getLogger('bqskit-runtime') - _logger.setLevel([30, 20, 10, 1][min(args.verbose, 3)]) - _logger.addHandler(logging.StreamHandler()) + log_level = [30, 20, 10, 1][min(args.verbose, 3)] + logging.getLogger().setLevel(log_level) + _handler = logging.StreamHandler() + _handler.setLevel(0) + _fmt_header = '%(asctime)s.%(msecs)03d - %(levelname)-8s |' + _fmt_message = ' %(module)s: %(message)s' + _fmt = _fmt_header + _fmt_message + _formatter = logging.Formatter(_fmt, '%H:%M:%S') + _handler.setFormatter(_formatter) + logging.getLogger().addHandler(_handler) # Import tests package recursively if args.import_tests: diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index ccffa3b9a..9ac612c6c 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -103,7 +103,9 @@ def step(self, send_val: Any = None) -> Any: self.max_logging_depth < 0 or len(self.breadcrumbs) <= self.max_logging_depth ): - logging.getLogger().setLevel(0) + logging.getLogger().setLevel(self.logging_level) + else: + logging.getLogger().setLevel(100) # Execute a task step to_return = self.coro.send(send_val) diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 8c96f0d6a..c0de57a8e 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -30,6 +30,7 @@ from bqskit.runtime.task import RuntimeTask +_logger = logging.getLogger(__name__) @dataclass @@ -197,12 +198,13 @@ def __init__(self, id: int, conn: Connection) -> None: def record_factory(*args: Any, **kwargs: Any) -> logging.LogRecord: record = old_factory(*args, **kwargs) - active_task = get_worker()._active_task - if active_task is not None: - lvl = active_task.logging_level - if lvl is None or lvl <= record.levelno: - tid = active_task.comp_task_id - self._conn.send((RuntimeMessage.LOG, (tid, record))) + active_task = self._active_task + if not record.name.startswith('bqskit.runtime'): + if active_task is not None: + lvl = active_task.logging_level + if lvl is None or lvl <= record.levelno: + tid = active_task.comp_task_id + self._conn.send((RuntimeMessage.LOG, (tid, record))) return record logging.setLogRecordFactory(record_factory) @@ -211,6 +213,7 @@ def record_factory(*args: Any, **kwargs: Any) -> logging.LogRecord: self.incomming_thread = Thread(target=self.recv_incoming) self.incomming_thread.daemon = True self.incomming_thread.start() + _logger.debug('Started incoming thread.') # Communicate that this worker is ready self._conn.send((RuntimeMessage.STARTED, self._id)) @@ -224,6 +227,7 @@ def _loop(self) -> None: self._running = False exc_info = sys.exc_info() error_str = ''.join(traceback.format_exception(*exc_info)) + _logger.error(error_str) try: self._conn.send((RuntimeMessage.ERROR, error_str)) except Exception: @@ -626,19 +630,21 @@ async def next(self, future: RuntimeFuture) -> list[tuple[int, Any]]: _worker = None -def start_worker(w_id: int | None, port: int, cpu: int | None = None) -> None: +def start_worker( + w_id: int | None, + port: int, + cpu: int | None = None, + logging_level: int = logging.WARNING, +) -> None: """Start this process's worker.""" if w_id is not None: # Ignore interrupt signals on workers, boss will handle it for us # If w_id is None, then we are being spawned separately. signal.signal(signal.SIGINT, signal.SIG_IGN) - # Purge all standard python logging configurations - for _, logger in logging.Logger.manager.loggerDict.items(): - if isinstance(logger, logging.PlaceHolder): - continue - logger.handlers.clear() - logging.Logger.manager.loggerDict = {} + # Enforce no default logging + # logging.lastResort.setLevel(100) + # logging.getLogger().handlers.clear() # Pin worker to cpu if cpu is not None: @@ -668,6 +674,19 @@ def start_worker(w_id: int | None, port: int, cpu: int | None = None) -> None: msg, w_id = conn.recv() assert msg == RuntimeMessage.STARTED + # Set up runtime logging + _runtime_logger = logging.getLogger('bqskit.runtime') + _runtime_logger.propagate = False + _runtime_logger.setLevel(logging_level) + _handler = logging.StreamHandler() + _handler.setLevel(0) + _fmt_header = '%(asctime)s.%(msecs)03d - %(levelname)-8s |' + _fmt_message = ' [wid=%(wid)s]: %(message)s' + _fmt = _fmt_header + _fmt_message + _formatter = logging.Formatter(_fmt, '%H:%M:%S', defaults={'wid': w_id}) + _handler.setFormatter(_formatter) + _runtime_logger.addHandler(_handler) + # Build and start worker global _worker _worker = Worker(w_id, conn) @@ -716,6 +735,12 @@ def start_worker_rank() -> None: default=default_worker_port, help='The port the workers will try to connect to a manager on.', ) + parser.add_argument( + '-v', '--verbose', + action='count', + default=0, + help='Enable logging of increasing verbosity, either -v, -vv, or -vvv.', + ) args = parser.parse_args() if args.cpus is not None: @@ -735,10 +760,13 @@ def start_worker_rank() -> None: else: cpus = [None for _ in range(args.num_workers)] + logging_level = [30, 20, 10, 1][min(args.verbose, 3)] + # Spawn worker process procs = [] for cpu in cpus: - procs.append(Process(target=start_worker, args=(None, args.port, cpu))) + args = (None, args.port, cpu, logging_level) + procs.append(Process(target=start_worker, args=args)) procs[-1].start() # Join them From f549a4f9bd58e5036d661ba614f6c7c2a95db2c5 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 4 Apr 2024 08:07:14 -0400 Subject: [PATCH 026/197] pre-commit --- bqskit/runtime/base.py | 2 +- bqskit/runtime/detached.py | 1 - bqskit/runtime/task.py | 2 +- bqskit/runtime/worker.py | 6 ++---- 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index 020a17fa5..d98e7c672 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -256,7 +256,7 @@ def spawn_workers( procs[w_id] = Process( target=start_worker, args=(w_id, port), - kwargs={'logging_level': logging_level} + kwargs={'logging_level': logging_level}, ) procs[w_id].daemon = True procs[w_id].start() diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index 1c447eda4..310072275 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -1,7 +1,6 @@ """This module implements the DetachedServer runtime.""" from __future__ import annotations -import sys import argparse import logging import selectors diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index 9ac612c6c..962ca1ff6 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -53,7 +53,7 @@ def __init__( This doubles as a unique system-wide id for the task. """ - self.logging_level = logging_level + self.logging_level = logging_level or 0 """Logs with levels >= to this get emitted, if None always emit.""" self.comp_task_id = comp_task_id diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index c0de57a8e..fbfd1c5c2 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -8,7 +8,6 @@ import sys import time import traceback -from collections import OrderedDict from dataclasses import dataclass from multiprocessing import Process from multiprocessing.connection import Client @@ -101,7 +100,6 @@ def deposit_result(self, result: RuntimeResult) -> None: self.result[slot_id] = result.result - class Worker: """ BQSKit Runtime's Worker. @@ -765,8 +763,8 @@ def start_worker_rank() -> None: # Spawn worker process procs = [] for cpu in cpus: - args = (None, args.port, cpu, logging_level) - procs.append(Process(target=start_worker, args=args)) + pargs = (None, args.port, cpu, logging_level) + procs.append(Process(target=start_worker, args=pargs)) procs[-1].start() # Join them From c6441465892898d640271ecccac11222a77fba5a Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 4 Apr 2024 09:35:47 -0400 Subject: [PATCH 027/197] BLAS thread control in runtime --- bqskit/compiler/compiler.py | 17 +++++++++++++++-- bqskit/runtime/__init__.py | 26 ++++++++++++++++---------- bqskit/runtime/attached.py | 14 +++++++++++++- bqskit/runtime/base.py | 15 ++++++++++++++- bqskit/runtime/detached.py | 9 ++++----- bqskit/runtime/manager.py | 19 ++++++++++++++++++- bqskit/runtime/worker.py | 17 ++++++++++++++--- 7 files changed, 94 insertions(+), 23 deletions(-) diff --git a/bqskit/compiler/compiler.py b/bqskit/compiler/compiler.py index ec03d5019..22fab9feb 100644 --- a/bqskit/compiler/compiler.py +++ b/bqskit/compiler/compiler.py @@ -71,6 +71,7 @@ def __init__( num_workers: int = -1, runtime_log_level: int = logging.WARNING, worker_port: int = default_worker_port, + num_blas_threads: int = 1, ) -> None: """ Construct a Compiler object. @@ -99,6 +100,9 @@ def __init__( worker_port (int): The optional port to pass to an attached runtime. See :obj:`~bqskit.runtime.attached.AttachedServer` for more info. + + num_blas_threads (int): The number of threads to use in the + BLAS libraries on the worker nodes. (Defaults to 1) """ self.p: Popen | None = None # type: ignore self.conn: Connection | None = None @@ -107,7 +111,12 @@ def __init__( if ip is None: ip = 'localhost' - self._start_server(num_workers, runtime_log_level, worker_port) + self._start_server( + num_workers, + runtime_log_level, + worker_port, + num_blas_threads, + ) self._connect_to_server(ip, port, self.p is not None) @@ -116,13 +125,17 @@ def _start_server( num_workers: int, runtime_log_level: int, worker_port: int, + num_blas_threads: int, ) -> None: """ Start an attached serer with `num_workers` workers. See :obj:`~bqskit.runtime.attached.AttachedServer` for more info. """ - params = f'{num_workers}, log_level={runtime_log_level}, {worker_port=}' + params = f'{num_workers}, ' + params += f'log_level={runtime_log_level}, ' + params += f'{worker_port=}, ' + params += f'{num_blas_threads=}, ' import_str = 'from bqskit.runtime.attached import start_attached_server' launch_str = f'{import_str}; start_attached_server({params})' if sys.platform == 'win32': diff --git a/bqskit/runtime/__init__.py b/bqskit/runtime/__init__.py index 0443671b6..2c01c4ddd 100644 --- a/bqskit/runtime/__init__.py +++ b/bqskit/runtime/__init__.py @@ -100,21 +100,27 @@ from typing import Protocol from typing import TYPE_CHECKING +if TYPE_CHECKING: + from bqskit.runtime.future import RuntimeFuture + + # Enable low-level fault handling: system crashes print a minimal trace. faulthandler.enable() +os.environ['RUST_BACKTRACE'] = '1' -# Disable multi-threading in BLAS libraries. -os.environ['OMP_NUM_THREADS'] = '1' -os.environ['OPENBLAS_NUM_THREADS'] = '1' -os.environ['MKL_NUM_THREADS'] = '1' -os.environ['NUMEXPR_NUM_THREADS'] = '1' -os.environ['VECLIB_MAXIMUM_THREADS'] = '1' -os.environ['RUST_BACKTRACE'] = '1' -print('SETTING THREADS TO 1') +# Control multi-threading in BLAS libraries. +def set_blas_thread_counts(i: int = 1) -> None: + """ + Control number of threads used by numpy and others. -if TYPE_CHECKING: - from bqskit.runtime.future import RuntimeFuture + Must be called before any numpy or other BLAS libraries are loaded. + """ + os.environ['OMP_NUM_THREADS'] = str(i) + os.environ['OPENBLAS_NUM_THREADS'] = str(i) + os.environ['MKL_NUM_THREADS'] = str(i) + os.environ['NUMEXPR_NUM_THREADS'] = str(i) + os.environ['VECLIB_MAXIMUM_THREADS'] = str(i) class RuntimeHandle(Protocol): diff --git a/bqskit/runtime/attached.py b/bqskit/runtime/attached.py index a1067d0fa..f68be0ee3 100644 --- a/bqskit/runtime/attached.py +++ b/bqskit/runtime/attached.py @@ -37,6 +37,7 @@ def __init__( port: int = default_server_port, worker_port: int = default_worker_port, log_level: int = logging.WARNING, + num_blas_threads: int = 1, ) -> None: """ Create a server with `num_workers` workers. @@ -53,6 +54,12 @@ def __init__( worker_port (int): The port this server will listen for workers on. Default can be found in the :obj:`~bqskit.runtime.default_worker_port` global variable. + + log_level (int): The logging level for the server and workers. + (Default: logging.WARNING). + + num_blas_threads (int): The number of threads to use in BLAS + libraries. (Default: 1). """ # Initialize runtime logging logging.getLogger().setLevel(log_level) @@ -85,7 +92,12 @@ def __init__( _logger.info('Connected to client.') # Start workers - self.spawn_workers(num_workers, worker_port, log_level) + self.spawn_workers( + num_workers, + worker_port, + log_level, + num_blas_threads, + ) def handle_disconnect(self, conn: Connection) -> None: """A client disconnect in attached mode is equal to a shutdown.""" diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index d98e7c672..1cb53c494 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -25,6 +25,7 @@ from bqskit.runtime import default_manager_port from bqskit.runtime import default_worker_port +from bqskit.runtime import set_blas_thread_counts from bqskit.runtime.address import RuntimeAddress from bqskit.runtime.direction import MessageDirection from bqskit.runtime.message import RuntimeMessage @@ -143,6 +144,9 @@ def __init__(self) -> None: self.conn_to_employee_dict: dict[Connection, RuntimeEmployee] = {} """Used to find the employee associated with a message.""" + # Servers do not need blas threads + set_blas_thread_counts(1) + # Safely and immediately exit on interrupt signals handle = functools.partial(sigint_handler, node=self) signal.signal(signal.SIGINT, handle) @@ -229,6 +233,7 @@ def spawn_workers( num_workers: int = -1, port: int = default_worker_port, logging_level: int = logging.WARNING, + num_blas_threads: int = 1, ) -> None: """ Spawn worker processes. @@ -241,6 +246,11 @@ def spawn_workers( port (int): The port this server will listen for workers on. Default can be found in the :obj:`~bqskit.runtime.default_worker_port` global variable. + + logging_level (int): The logging level for the workers. + + num_blas_threads (int): The number of threads to use in BLAS + libraries. (Default: 1). """ if num_workers == -1: oscount = os.cpu_count() @@ -256,7 +266,10 @@ def spawn_workers( procs[w_id] = Process( target=start_worker, args=(w_id, port), - kwargs={'logging_level': logging_level}, + kwargs={ + 'logging_level': logging_level, + 'num_blas_threads': num_blas_threads, + }, ) procs[w_id].daemon = True procs[w_id].start() diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index 310072275..230a5949d 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -19,8 +19,6 @@ from typing import Sequence from typing import Tuple -from bqskit.compiler.status import CompilationStatus -from bqskit.compiler.task import CompilationTask from bqskit.runtime import default_server_port from bqskit.runtime.address import RuntimeAddress from bqskit.runtime.base import import_tests_package @@ -142,8 +140,7 @@ def handle_message( self.handle_disconnect(conn) elif msg == RuntimeMessage.SUBMIT: - ctask = cast(CompilationTask, payload) - self.handle_new_comp_task(conn, ctask) + self.handle_new_comp_task(conn, payload) elif msg == RuntimeMessage.REQUEST: request = cast(uuid.UUID, payload) @@ -256,9 +253,10 @@ def handle_disconnect(self, conn: Connection) -> None: def handle_new_comp_task( self, conn: Connection, - task: CompilationTask, + task: Any, # Explicitly not CompilationTask to avoid early import ) -> None: """Convert a :class:`CompilationTask` into an internal one.""" + from bqskit.compiler.task import CompilationTask mailbox_id = self._get_new_mailbox_id() self.tasks[task.task_id] = (mailbox_id, conn) self.mailbox_to_task_dict[mailbox_id] = task.task_id @@ -306,6 +304,7 @@ def handle_request(self, conn: Connection, request: uuid.UUID) -> None: def handle_status(self, conn: Connection, request: uuid.UUID) -> None: """Inform the client if the task is finished or not.""" + from bqskit.compiler.status import CompilationStatus if request not in self.clients[conn] or request not in self.tasks: # This task is unknown to the system m = (conn, RuntimeMessage.STATUS, CompilationStatus.UNKNOWN) diff --git a/bqskit/runtime/manager.py b/bqskit/runtime/manager.py index 6dcbd4f5c..507cdf9a3 100644 --- a/bqskit/runtime/manager.py +++ b/bqskit/runtime/manager.py @@ -47,6 +47,8 @@ def __init__( ipports: list[tuple[str, int]] | None = None, worker_port: int = default_worker_port, only_connect: bool = False, + log_level: int = logging.WARNING, + num_blas_threads: int = 1, ) -> None: """ Create a manager instance in one of two ways: @@ -83,6 +85,16 @@ def __init__( only_connect (bool): If true, do not spawn workers, only connect to already spawned workers. + + log_level (int): The logging level for the manager and workers. + If `only_connect` is True, doesn't set worker's log level. + In that case, set the worker's log level when spawning them. + (Default: logging.WARNING). + + num_blas_threads (int): The number of threads to use in BLAS + libraries. If `only_connect` is True this is ignored. In + that case, set the thread count when spawning workers. + (Default: 1). """ super().__init__() @@ -105,7 +117,12 @@ def __init__( if only_connect: self.connect_to_workers(num_workers, worker_port) else: - self.spawn_workers(num_workers, worker_port) + self.spawn_workers( + num_workers, + worker_port, + log_level, + num_blas_threads, + ) # Case 2: Connect to detached managers at ipports else: diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index fbfd1c5c2..c034c2ce2 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -22,6 +22,7 @@ from typing import List from bqskit.runtime import default_worker_port +from bqskit.runtime import set_blas_thread_counts from bqskit.runtime.address import RuntimeAddress from bqskit.runtime.future import RuntimeFuture from bqskit.runtime.message import RuntimeMessage @@ -633,6 +634,7 @@ def start_worker( port: int, cpu: int | None = None, logging_level: int = logging.WARNING, + num_blas_threads: int = 1, ) -> None: """Start this process's worker.""" if w_id is not None: @@ -640,9 +642,12 @@ def start_worker( # If w_id is None, then we are being spawned separately. signal.signal(signal.SIGINT, signal.SIG_IGN) + # Set number of BLAS threads + set_blas_thread_counts(num_blas_threads) + # Enforce no default logging - # logging.lastResort.setLevel(100) - # logging.getLogger().handlers.clear() + logging.lastResort = logging.NullHandler() # type: ignore # TODO: should I report this as a type bug? # noqa: E501 + logging.getLogger().handlers.clear() # Pin worker to cpu if cpu is not None: @@ -739,6 +744,12 @@ def start_worker_rank() -> None: default=0, help='Enable logging of increasing verbosity, either -v, -vv, or -vvv.', ) + parser.add_argument( + '-t', '--num_blas_threads', + type=int, + default=1, + help='The number of threads to use in BLAS libraries.', + ) args = parser.parse_args() if args.cpus is not None: @@ -763,7 +774,7 @@ def start_worker_rank() -> None: # Spawn worker process procs = [] for cpu in cpus: - pargs = (None, args.port, cpu, logging_level) + pargs = (None, args.port, cpu, logging_level, args.num_blas_threads) procs.append(Process(target=start_worker, args=pargs)) procs[-1].start() From f71e63d8fc837748614de63e01e9b537c219bfc5 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 4 Apr 2024 21:46:24 -0400 Subject: [PATCH 028/197] Scheduling change --- bqskit/runtime/attached.py | 3 + bqskit/runtime/base.py | 115 +++++++++++++++++++++++++++++++++--- bqskit/runtime/manager.py | 3 + bqskit/runtime/task.py | 14 ++++- bqskit/runtime/worker.py | 66 +++++++++++---------- bqskit/utils/cachedclass.py | 3 +- 6 files changed, 162 insertions(+), 42 deletions(-) diff --git a/bqskit/runtime/attached.py b/bqskit/runtime/attached.py index f68be0ee3..1590614c6 100644 --- a/bqskit/runtime/attached.py +++ b/bqskit/runtime/attached.py @@ -99,6 +99,9 @@ def __init__( num_blas_threads, ) + self.schedule_tasks = self.schedule_for_workers # type: ignore + self.handle_waiting = self.handle_direct_worker_waiting # type: ignore + def handle_disconnect(self, conn: Connection) -> None: """A client disconnect in attached mode is equal to a shutdown.""" self.handle_shutdown() diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index 1cb53c494..0d0f010eb 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -101,6 +101,49 @@ def get_num_of_tasks_sent_since( raise RuntimeError('Read receipt not found in submit cache.') +class MultiLevelQueue: + """A multi-level queue for delaying submitted tasks.""" + + def __init__(self) -> None: + """Initialize the multi-level queue.""" + self.queue: dict[int, list[RuntimeTask]] = {} + self.levels: int | None = None + + def delay(self, tasks: Sequence[RuntimeTask]) -> None: + """Update the multi-level queue with tasks.""" + for task in tasks: + task_depth = len(task.breadcrumbs) + + if task_depth not in self.queue: + if self.levels is None or task_depth > self.levels: + self.levels = task_depth + self.queue[task_depth] = [] + + self.queue[task_depth].append(task) + + def empty(self) -> bool: + """Return True if the multi-level queue is empty.""" + return self.levels is None + + def pop(self) -> RuntimeTask: + """Pop the next task from the multi-level queue.""" + if self.empty(): + raise RuntimeError('Cannot pop from an empty multi-level queue.') + + task = self.queue[self.levels].pop() # type: ignore # checked above + + while self.levels is not None: + if self.levels in self.queue: + if len(self.queue[self.levels]) != 0: + break + self.queue.pop(self.levels) + self.levels -= 1 + if self.levels < 0: + self.levels = None + + return task + + def sigint_handler(signum: int, _: FrameType | None, node: ServerBase) -> None: """Interrupt the node.""" if not node.running: @@ -144,6 +187,9 @@ def __init__(self) -> None: self.conn_to_employee_dict: dict[Connection, RuntimeEmployee] = {} """Used to find the employee associated with a message.""" + self.multi_level_queue = MultiLevelQueue() + """Used to delay tasks until they can be scheduled.""" + # Servers do not need blas threads set_blas_thread_counts(1) @@ -579,18 +625,47 @@ def schedule_tasks(self, tasks: Sequence[RuntimeTask]) -> None: reverse=True, ) for e, assignment in sorted_assignments: - num_tasks = len(assignment) + self.send_tasks_to_employee(e, assignment) - if num_tasks == 0: - continue + self.num_idle_workers = sum(e.num_idle_workers for e in self.employees) - self.outgoing.put((e.conn, RuntimeMessage.SUBMIT_BATCH, assignment)) + def schedule_for_workers(self, tasks: Sequence[RuntimeTask]) -> None: + """Schedule tasks for workers, return the amount assigned.""" + if len(tasks) == 0: + return - e.num_tasks += num_tasks - e.num_idle_workers -= min(num_tasks, e.num_idle_workers) - e.submit_cache.append((assignment[0].unique_id, num_tasks)) + num_assigned_tasks = 0 + for e in self.employees: + # TODO: randomize? prioritize workers idle but with less tasks? + if num_assigned_tasks >= len(tasks): + break + + if e.num_idle_workers > 0: + task = tasks[num_assigned_tasks] + self.send_tasks_to_employee(e, [task]) + num_assigned_tasks += 1 self.num_idle_workers = sum(e.num_idle_workers for e in self.employees) + self.multi_level_queue.delay(tasks[num_assigned_tasks:]) + + def send_tasks_to_employee( + self, + e: RuntimeEmployee, + tasks: Sequence[RuntimeTask], + ) -> None: + """Send the `task` to the employee responsible for `worker_id`.""" + num_tasks = len(tasks) + + if num_tasks == 0: + return + + e.num_tasks += num_tasks + e.num_idle_workers -= min(num_tasks, e.num_idle_workers) + e.submit_cache.append((tasks[0].unique_id, num_tasks)) + if num_tasks == 1: + self.outgoing.put((e.conn, RuntimeMessage.SUBMIT, tasks[0])) + else: + self.outgoing.put((e.conn, RuntimeMessage.SUBMIT_BATCH, tasks)) def send_result_down(self, result: RuntimeResult) -> None: """Send the `result` to the appropriate employee.""" @@ -641,14 +716,36 @@ def handle_waiting( idle count by the number of tasks sent since the read receipt. """ employee = self.conn_to_employee_dict[conn] - unaccounted_task = employee.get_num_of_tasks_sent_since(read_receipt) - adjusted_idle_count = max(new_idle_count - unaccounted_task, 0) + unaccounted_tasks = employee.get_num_of_tasks_sent_since(read_receipt) + adjusted_idle_count = max(new_idle_count - unaccounted_tasks, 0) old_count = employee.num_idle_workers employee.num_idle_workers = adjusted_idle_count self.num_idle_workers += (adjusted_idle_count - old_count) assert 0 <= self.num_idle_workers <= self.total_workers + def handle_direct_worker_waiting( + self, + conn: Connection, + new_idle_count: int, + read_receipt: RuntimeAddress | None, + ) -> None: + """ + Record that a worker is idle with nothing to do. + + Schedule tasks from the multi-level queue to the worker. + """ + ServerBase.handle_waiting(self, conn, new_idle_count, read_receipt) + + if self.multi_level_queue.empty(): + return + + employee = self.conn_to_employee_dict[conn] + if employee.num_idle_workers > 0: + task = self.multi_level_queue.pop() + self.send_tasks_to_employee(employee, [task]) + self.num_idle_workers -= 1 + def parse_ipports(ipports_str: Sequence[str]) -> list[tuple[str, int]]: """Parse command line ip and port inputs.""" diff --git a/bqskit/runtime/manager.py b/bqskit/runtime/manager.py index 507cdf9a3..0283be6c4 100644 --- a/bqskit/runtime/manager.py +++ b/bqskit/runtime/manager.py @@ -124,6 +124,9 @@ def __init__( num_blas_threads, ) + self.schedule_tasks = self.schedule_for_workers # type: ignore + self.handle_waiting = self.handle_direct_worker_waiting # type: ignore # noqa: E501 + # Case 2: Connect to detached managers at ipports else: self.connect_to_managers(ipports) diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index 962ca1ff6..ca0feefa7 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -3,6 +3,7 @@ import inspect import logging +import pickle from typing import Any from typing import Coroutine @@ -41,7 +42,13 @@ def __init__( RuntimeTask.task_counter += 1 self.task_id = RuntimeTask.task_counter - self.serialized_fnargs = dill.dumps(fnargs) + try: + self.serialized_fnargs = pickle.dumps(fnargs) + self.serialized_with_pickle = True + except Exception: + self.serialized_fnargs = dill.dumps(fnargs) + self.serialized_with_pickle = False + self._fnargs: tuple[Any, Any, Any] | None = None self._name = fnargs[0].__name__ """Tuple of function pointer, arguments, and keyword arguments.""" @@ -84,7 +91,10 @@ def __init__( def fnargs(self) -> tuple[Any, Any, Any]: """Return the function pointer, arguments, and keyword arguments.""" if self._fnargs is None: - self._fnargs = dill.loads(self.serialized_fnargs) + if self.serialized_with_pickle: + self._fnargs = pickle.loads(self.serialized_fnargs) + else: + self._fnargs = dill.loads(self.serialized_fnargs) assert self._fnargs is not None # for type checker return self._fnargs diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index c034c2ce2..f2590bfaf 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -162,8 +162,8 @@ def __init__(self, id: int, conn: Connection) -> None: self._tasks: dict[RuntimeAddress, RuntimeTask] = {} """Tracks all started, unfinished tasks on this worker.""" - self._delayed_tasks: list[RuntimeTask] = [] - """Store all delayed tasks in LIFO order.""" + # self._delayed_tasks: list[RuntimeTask] = [] + # """Store all delayed tasks in LIFO order.""" self._ready_task_ids: Queue[RuntimeAddress] = Queue() """Tasks queued up for execution.""" @@ -203,7 +203,7 @@ def record_factory(*args: Any, **kwargs: Any) -> logging.LogRecord: lvl = active_task.logging_level if lvl is None or lvl <= record.levelno: tid = active_task.comp_task_id - self._conn.send((RuntimeMessage.LOG, (tid, record))) + self._send(RuntimeMessage.LOG, (tid, record)) return record logging.setLogRecordFactory(record_factory) @@ -215,7 +215,7 @@ def record_factory(*args: Any, **kwargs: Any) -> logging.LogRecord: _logger.debug('Started incoming thread.') # Communicate that this worker is ready - self._conn.send((RuntimeMessage.STARTED, self._id)) + self._send(RuntimeMessage.STARTED, self._id) def _loop(self) -> None: """Main worker event loop.""" @@ -228,7 +228,7 @@ def _loop(self) -> None: error_str = ''.join(traceback.format_exception(*exc_info)) _logger.error(error_str) try: - self._conn.send((RuntimeMessage.ERROR, error_str)) + self._send(RuntimeMessage.ERROR, error_str) except Exception: pass @@ -256,17 +256,17 @@ def recv_incoming(self) -> None: self._add_task(task) self.read_receipt_mutex.release() - elif msg == RuntimeMessage.SUBMIT_BATCH: - self.read_receipt_mutex.acquire() - tasks = cast(List[RuntimeTask], payload) - self.most_recent_read_submit = tasks[0].unique_id - self._add_task(tasks.pop()) # Submit one task - self._delayed_tasks.extend(tasks) # Delay rest - # Delayed tasks have no context and are stored (more-or-less) - # as a function pointer together with the arguments. - # When it gets started, it consumes much more memory, - # so we delay the task start until necessary (at no cost) - self.read_receipt_mutex.release() + # elif msg == RuntimeMessage.SUBMIT_BATCH: + # self.read_receipt_mutex.acquire() + # tasks = cast(List[RuntimeTask], payload) + # self.most_recent_read_submit = tasks[0].unique_id + # self._add_task(tasks.pop()) # Submit one task + # self._delayed_tasks.extend(tasks) # Delay rest + # # Delayed tasks have no context and are stored (more-or-less) + # # as a function pointer together with the arguments. + # # When it gets started, it consumes much more memory, + # # so we delay the task start until necessary (at no cost) + # self.read_receipt_mutex.release() elif msg == RuntimeMessage.RESULT: result = cast(RuntimeResult, payload) @@ -340,17 +340,17 @@ def _handle_cancel(self, addr: RuntimeAddress) -> None: } # Remove all tasks that are children of `addr` from delayed tasks - self._delayed_tasks = [ - t for t in self._delayed_tasks - if not t.is_descendant_of(addr) - ] + # self._delayed_tasks = [ + # t for t in self._delayed_tasks + # if not t.is_descendant_of(addr) + # ] def _get_next_ready_task(self) -> RuntimeTask | None: """Return the next ready task if one exists, otherwise block.""" while True: - if self._ready_task_ids.empty() and len(self._delayed_tasks) > 0: - self._add_task(self._delayed_tasks.pop()) - continue + # if self._ready_task_ids.empty() and len(self._delayed_tasks) > 0: + # self._add_task(self._delayed_tasks.pop()) + # continue self.read_receipt_mutex.acquire() try: @@ -358,7 +358,7 @@ def _get_next_ready_task(self) -> RuntimeTask | None: except Empty: payload = (1, self.most_recent_read_submit) - self._conn.send((RuntimeMessage.WAITING, payload)) + self._send(RuntimeMessage.WAITING, payload) self.read_receipt_mutex.release() addr = self._ready_task_ids.get() @@ -412,7 +412,7 @@ def _try_step_next_ready_task(self) -> None: exc_info = sys.exc_info() error_str = ''.join(traceback.format_exception(*exc_info)) error_payload = (self._active_task.comp_task_id, error_str) - self._conn.send((RuntimeMessage.ERROR, error_payload)) + self._send(RuntimeMessage.ERROR, error_payload) finally: self._active_task = None @@ -456,11 +456,11 @@ def _process_task_completion(self, task: RuntimeTask, result: Any) -> None: if task.return_address.worker_id == self._id: self._handle_result(packaged_result) - self._conn.send((RuntimeMessage.UPDATE, -1)) + self._send(RuntimeMessage.UPDATE, -1) # Let manager know this worker has one less task # without sending a result else: - self._conn.send((RuntimeMessage.RESULT, packaged_result)) + self._send(RuntimeMessage.RESULT, packaged_result) # Remove task self._tasks.pop(task.return_address) @@ -498,6 +498,12 @@ def _get_new_mailbox_id(self) -> int: self._mailbox_counter += 1 return new_id + def _send(self, msg: RuntimeMessage, payload: Any) -> None: + """Send a message to the boss.""" + _logger.debug(f'Sending message {msg.name}.') + _logger.log(1, f'Payload: {payload}') + self._conn.send((msg, payload)) + def submit( self, fn: Callable[..., Any], @@ -525,7 +531,7 @@ def submit( ) # Submit the task (on the next cycle) - self._conn.send((RuntimeMessage.SUBMIT, task)) + self._send(RuntimeMessage.SUBMIT, task) # Return future pointing to the mailbox return RuntimeFuture(mailbox_id) @@ -572,7 +578,7 @@ def map( ] # Submit the tasks - self._conn.send((RuntimeMessage.SUBMIT_BATCH, tasks)) + self._send(RuntimeMessage.SUBMIT_BATCH, tasks) # Return future pointing to the mailbox return RuntimeFuture(mailbox_id) @@ -588,7 +594,7 @@ def cancel(self, future: RuntimeFuture) -> None: for slot_id in range(num_slots) ] for addr in addrs: - self._conn.send((RuntimeMessage.CANCEL, addr)) + self._send(RuntimeMessage.CANCEL, addr) def get_cache(self) -> dict[str, Any]: """ diff --git a/bqskit/utils/cachedclass.py b/bqskit/utils/cachedclass.py index 751a361b6..7edac47b9 100644 --- a/bqskit/utils/cachedclass.py +++ b/bqskit/utils/cachedclass.py @@ -63,7 +63,8 @@ def __new__(cls: type[T], *args: Any, **kwargs: Any) -> T: _instances = cls._instances # type: ignore if _instances.get(key, None) is None: - _logger.debug( + _logger.log( + 1, ( 'Creating cached instance for class: %s,' ' with args %s, and kwargs %s' From 3afac43c91b23c905e90bb08e0a5812a1232564e Mon Sep 17 00:00:00 2001 From: Justin Kalloor Date: Mon, 8 Apr 2024 17:11:57 -0700 Subject: [PATCH 029/197] Adding TreeScan pass --- bqskit/passes/__init__.py | 2 + bqskit/passes/processing/__init__.py | 2 + bqskit/passes/processing/treescan.py | 177 +++++++++++++++++++++++++++ 3 files changed, 181 insertions(+) create mode 100644 bqskit/passes/processing/treescan.py diff --git a/bqskit/passes/__init__.py b/bqskit/passes/__init__.py index eb8e41607..fd2b4ad8e 100644 --- a/bqskit/passes/__init__.py +++ b/bqskit/passes/__init__.py @@ -257,6 +257,7 @@ from bqskit.passes.processing.iterative import IterativeScanningGateRemovalPass from bqskit.passes.processing.scan import ScanningGateRemovalPass from bqskit.passes.processing.substitute import SubstitutePass +from bqskit.passes.processing.treescan import TreeScanningGateRemovalPass from bqskit.passes.retarget.auto import AutoRebase2QuditGatePass from bqskit.passes.retarget.general import GeneralSQDecomposition from bqskit.passes.retarget.two import Rebase2QuditGatePass @@ -329,6 +330,7 @@ 'UpdateDataPass', 'ToU3Pass', 'ScanningGateRemovalPass', + 'TreeScanningGateRemovalPass' 'SimpleLayerGenerator', 'AStarHeuristic', 'GreedyHeuristic', diff --git a/bqskit/passes/processing/__init__.py b/bqskit/passes/processing/__init__.py index e54966b93..675eb2268 100644 --- a/bqskit/passes/processing/__init__.py +++ b/bqskit/passes/processing/__init__.py @@ -5,10 +5,12 @@ from bqskit.passes.processing.iterative import IterativeScanningGateRemovalPass from bqskit.passes.processing.scan import ScanningGateRemovalPass from bqskit.passes.processing.substitute import SubstitutePass +from bqskit.passes.processing.treescan import TreeScanningGateRemovalPass __all__ = [ 'ExhaustiveGateRemovalPass', 'IterativeScanningGateRemovalPass', 'ScanningGateRemovalPass', 'SubstitutePass', + 'TreeScanningGateRemovalPass', ] diff --git a/bqskit/passes/processing/treescan.py b/bqskit/passes/processing/treescan.py new file mode 100644 index 000000000..fde796cba --- /dev/null +++ b/bqskit/passes/processing/treescan.py @@ -0,0 +1,177 @@ +"""This module implements the ScanningGateRemovalPass.""" +from __future__ import annotations + +import logging +from typing import Any +from typing import Callable + +from bqskit.compiler.basepass import BasePass +from bqskit.compiler.passdata import PassData +from bqskit.ir.circuit import Circuit +from bqskit.ir.operation import Operation +from bqskit.ir.opt.cost.functions import HilbertSchmidtResidualsGenerator +from bqskit.ir.opt.cost.generator import CostFunctionGenerator +from bqskit.utils.typing import is_real_number +from bqskit.runtime import get_runtime + +_logger = logging.getLogger(__name__) + + +class TreeScanningGateRemovalPass(BasePass): + """ + The ScanningGateRemovalPass class. + + Starting from one side of the circuit, attempt to remove gates one-by-one. + """ + + def __init__( + self, + start_from_left: bool = True, + success_threshold: float = 1e-8, + cost: CostFunctionGenerator = HilbertSchmidtResidualsGenerator(), + instantiate_options: dict[str, Any] = {}, + tree_depth: int = 1, + collection_filter: Callable[[Operation], bool] | None = None, + ) -> None: + """ + Construct a ScanningGateRemovalPass. + + Args: + start_from_left (bool): Determines where the scan starts + attempting to remove gates from. If True, scan goes left + to right, otherwise right to left. (Default: True) + + success_threshold (float): The distance threshold that + determines successful termintation. Measured in cost + described by the hilbert schmidt cost function. + (Default: 1e-8) + + cost (CostFunction | None): The cost function that determines + successful removal of a gate. + (Default: HilbertSchmidtResidualsGenerator()) + + instantiate_options (dict[str: Any]): Options passed directly + to circuit.instantiate when instantiating circuit + templates. (Default: {}) + + tree_depth (int): The depth of the tree of potential + solutions to instantiate. Note that 2^(tree_depth) - 1 + circuits will be instantiated in parallel. + + collection_filter (Callable[[Operation], bool] | None): + A predicate that determines which operations should be + attempted to be removed. Called with each operation + in the circuit. If this returns true, this pass will + attempt to remove that operation. Defaults to all + operations. + """ + + if not is_real_number(success_threshold): + raise TypeError( + 'Expected real number for success_threshold' + ', got %s' % type(success_threshold), + ) + + if not isinstance(cost, CostFunctionGenerator): + raise TypeError( + 'Expected cost to be a CostFunctionGenerator, got %s' + % type(cost), + ) + + if not isinstance(instantiate_options, dict): + raise TypeError( + 'Expected dictionary for instantiate_options, got %s.' + % type(instantiate_options), + ) + + self.collection_filter = collection_filter or default_collection_filter + + if not callable(self.collection_filter): + raise TypeError( + 'Expected callable method that maps Operations to booleans for' + ' collection_filter, got %s.' % type(self.collection_filter), + ) + + self.tree_depth = tree_depth + self.start_from_left = start_from_left + self.success_threshold = success_threshold + self.cost = cost + self.instantiate_options: dict[str, Any] = { + 'dist_tol': self.success_threshold, + 'min_iters': 100, + 'cost_fn_gen': self.cost, + } + self.instantiate_options.update(instantiate_options) + + # Implement recursively for now, if slow then fix + def get_tree_circs(orig_num_cycles, circuit_copy: Circuit, cycle_and_ops: list[tuple[int, Operation]]) -> list[Circuit]: + all_circs = [circuit_copy.copy()] + for cycle, op in cycle_and_ops: + new_circs = [] + for circ in all_circs: + idx_shift = orig_num_cycles - circ.num_cycles + new_cycle = cycle - idx_shift + work_copy = circ.copy() + work_copy.pop((new_cycle, op.location[0])) + new_circs.append(work_copy) + new_circs.append(circ) + + all_circs = new_circs + + all_circs = sorted(all_circs, key= lambda x: x.num_operations) + + return all_circs + + + + async def run(self, circuit: Circuit, data: PassData) -> None: + """Perform the pass's operation, see :class:`BasePass` for more.""" + instantiate_options = self.instantiate_options.copy() + if 'seed' not in instantiate_options: + instantiate_options['seed'] = data.seed + + start = 'left' if self.start_from_left else 'right' + _logger.debug(f'Starting scanning gate removal on the {start}.') + + target = self.get_target(circuit, data) + # target = None + + circuit_copy = circuit.copy() + reverse_iter = not self.start_from_left + + ops_left = list(circuit.operations_with_cycles(reverse=reverse_iter)) + print(f"Starting Scan with tree depth {self.tree_depth} on circuit with {len(ops_left)} gates") + + while ops_left: + chunk, ops_left = ops_left[:self.tree_depth], ops_left[self.tree_depth:] + + # Circuits of size 2 ** tree_depth - 1, + # ranked in order of most to fewest deletions + all_circs = TreeScanningGateRemovalPass.get_tree_circs(circuit.num_cycles, circuit_copy, chunk) + # Remove circuit with no gates deleted + all_circs = all_circs[:-1] + + _logger.debug(f'Attempting removal of operation of {self.tree_depth} operations.') + + instantiated_circuits = await get_runtime().map( + Circuit.instantiate, + all_circs, + target=target, + **instantiate_options, + ) + + dists = [self.cost(c, target) for c in instantiated_circuits] + _logger.debug(f'Circuit distances: {dists}') + + # Pick least count with least dist + for i, dist in enumerate(dists): + if dist < self.success_threshold: + _logger.debug(f"Successfully switched to circuit {i} of {2 ** self.tree_depth}.") + circuit_copy = instantiated_circuits[i] + break + + circuit.become(circuit_copy) + + +def default_collection_filter(op: Operation) -> bool: + return True \ No newline at end of file From 84a2bc1fc71d3c8a162981e0c9d510b12de5590c Mon Sep 17 00:00:00 2001 From: Justin Kalloor Date: Mon, 8 Apr 2024 17:30:04 -0700 Subject: [PATCH 030/197] Reverting .gitignore --- .gitignore | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index 8bcc07d04..af49a7c0c 100644 --- a/.gitignore +++ b/.gitignore @@ -14,12 +14,4 @@ dask-worker-space .ipynb_checkpoints dist build -scratch/ -**/*.pickle -**/*.data -**/*.pkl -**/*.log -**/*.txt -**/*.sh -**/*.qasm - +scratch/ \ No newline at end of file From ef89038b49cc1d694518d12d5472b096476bfbd5 Mon Sep 17 00:00:00 2001 From: Justin Kalloor Date: Mon, 8 Apr 2024 17:30:57 -0700 Subject: [PATCH 031/197] Actually reverting .gitignore --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index af49a7c0c..18cb8929a 100644 --- a/.gitignore +++ b/.gitignore @@ -14,4 +14,4 @@ dask-worker-space .ipynb_checkpoints dist build -scratch/ \ No newline at end of file +scratch/ From bb99350abe9fb9e91599147997e3cb19254703e4 Mon Sep 17 00:00:00 2001 From: Justin Kalloor Date: Mon, 8 Apr 2024 18:03:35 -0700 Subject: [PATCH 032/197] tox --- bqskit/passes/__init__.py | 2 +- bqskit/passes/processing/treescan.py | 57 +++++++++++++++++----------- 2 files changed, 36 insertions(+), 23 deletions(-) diff --git a/bqskit/passes/__init__.py b/bqskit/passes/__init__.py index fd2b4ad8e..f2581e8f9 100644 --- a/bqskit/passes/__init__.py +++ b/bqskit/passes/__init__.py @@ -330,7 +330,7 @@ 'UpdateDataPass', 'ToU3Pass', 'ScanningGateRemovalPass', - 'TreeScanningGateRemovalPass' + 'TreeScanningGateRemovalPass', 'SimpleLayerGenerator', 'AStarHeuristic', 'GreedyHeuristic', diff --git a/bqskit/passes/processing/treescan.py b/bqskit/passes/processing/treescan.py index fde796cba..0fe0063ef 100644 --- a/bqskit/passes/processing/treescan.py +++ b/bqskit/passes/processing/treescan.py @@ -11,8 +11,8 @@ from bqskit.ir.operation import Operation from bqskit.ir.opt.cost.functions import HilbertSchmidtResidualsGenerator from bqskit.ir.opt.cost.generator import CostFunctionGenerator -from bqskit.utils.typing import is_real_number from bqskit.runtime import get_runtime +from bqskit.utils.typing import is_real_number _logger = logging.getLogger(__name__) @@ -54,9 +54,9 @@ def __init__( to circuit.instantiate when instantiating circuit templates. (Default: {}) - tree_depth (int): The depth of the tree of potential - solutions to instantiate. Note that 2^(tree_depth) - 1 - circuits will be instantiated in parallel. + tree_depth (int): The depth of the tree of potential + solutions to instantiate. Note that 2^(tree_depth) - 1 + circuits will be instantiated in parallel. collection_filter (Callable[[Operation], bool] | None): A predicate that determines which operations should be @@ -103,8 +103,12 @@ def __init__( } self.instantiate_options.update(instantiate_options) - # Implement recursively for now, if slow then fix - def get_tree_circs(orig_num_cycles, circuit_copy: Circuit, cycle_and_ops: list[tuple[int, Operation]]) -> list[Circuit]: + @staticmethod + def get_tree_circs( + orig_num_cycles: int, circuit_copy: Circuit, + cycle_and_ops: list[tuple[int, Operation]], + ) -> list[Circuit]: + # Implement recursively for now, if slow then fix all_circs = [circuit_copy.copy()] for cycle, op in cycle_and_ops: new_circs = [] @@ -118,12 +122,10 @@ def get_tree_circs(orig_num_cycles, circuit_copy: Circuit, cycle_and_ops: list[t all_circs = new_circs - all_circs = sorted(all_circs, key= lambda x: x.num_operations) + all_circs = sorted(all_circs, key=lambda x: x.num_operations) return all_circs - - async def run(self, circuit: Circuit, data: PassData) -> None: """Perform the pass's operation, see :class:`BasePass` for more.""" instantiate_options = self.instantiate_options.copy() @@ -140,33 +142,44 @@ async def run(self, circuit: Circuit, data: PassData) -> None: reverse_iter = not self.start_from_left ops_left = list(circuit.operations_with_cycles(reverse=reverse_iter)) - print(f"Starting Scan with tree depth {self.tree_depth} on circuit with {len(ops_left)} gates") + print( + f'Starting Scan with tree depth {self.tree_depth}' + ' on circuit with {len(ops_left)} gates', + ) while ops_left: - chunk, ops_left = ops_left[:self.tree_depth], ops_left[self.tree_depth:] + chunk = ops_left[:self.tree_depth] + ops_left = ops_left[self.tree_depth:] - # Circuits of size 2 ** tree_depth - 1, + # Circuits of size 2 ** tree_depth - 1, # ranked in order of most to fewest deletions - all_circs = TreeScanningGateRemovalPass.get_tree_circs(circuit.num_cycles, circuit_copy, chunk) + all_circs = TreeScanningGateRemovalPass.get_tree_circs( + circuit.num_cycles, circuit_copy, chunk, + ) # Remove circuit with no gates deleted all_circs = all_circs[:-1] - _logger.debug(f'Attempting removal of operation of {self.tree_depth} operations.') + _logger.debug( + 'Attempting removal of operation of' + f' {self.tree_depth} operations.', + ) instantiated_circuits = await get_runtime().map( - Circuit.instantiate, - all_circs, - target=target, - **instantiate_options, + Circuit.instantiate, + all_circs, + target=target, + **instantiate_options, ) - + dists = [self.cost(c, target) for c in instantiated_circuits] - _logger.debug(f'Circuit distances: {dists}') # Pick least count with least dist for i, dist in enumerate(dists): if dist < self.success_threshold: - _logger.debug(f"Successfully switched to circuit {i} of {2 ** self.tree_depth}.") + _logger.debug( + f'Successfully switched to circuit {i}' + ' of {2 ** self.tree_depth}.', + ) circuit_copy = instantiated_circuits[i] break @@ -174,4 +187,4 @@ async def run(self, circuit: Circuit, data: PassData) -> None: def default_collection_filter(op: Operation) -> bool: - return True \ No newline at end of file + return True From 401a4ca9cd6490134cb38036f98f865fab52f561 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 11 Apr 2024 15:44:56 -0400 Subject: [PATCH 033/197] Revert "Scheduling change" This reverts commit f71e63d8fc837748614de63e01e9b537c219bfc5. --- bqskit/runtime/attached.py | 3 - bqskit/runtime/base.py | 115 +++--------------------------------- bqskit/runtime/manager.py | 3 - bqskit/runtime/task.py | 14 +---- bqskit/runtime/worker.py | 66 ++++++++++----------- bqskit/utils/cachedclass.py | 3 +- 6 files changed, 42 insertions(+), 162 deletions(-) diff --git a/bqskit/runtime/attached.py b/bqskit/runtime/attached.py index 1590614c6..f68be0ee3 100644 --- a/bqskit/runtime/attached.py +++ b/bqskit/runtime/attached.py @@ -99,9 +99,6 @@ def __init__( num_blas_threads, ) - self.schedule_tasks = self.schedule_for_workers # type: ignore - self.handle_waiting = self.handle_direct_worker_waiting # type: ignore - def handle_disconnect(self, conn: Connection) -> None: """A client disconnect in attached mode is equal to a shutdown.""" self.handle_shutdown() diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index 0d0f010eb..1cb53c494 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -101,49 +101,6 @@ def get_num_of_tasks_sent_since( raise RuntimeError('Read receipt not found in submit cache.') -class MultiLevelQueue: - """A multi-level queue for delaying submitted tasks.""" - - def __init__(self) -> None: - """Initialize the multi-level queue.""" - self.queue: dict[int, list[RuntimeTask]] = {} - self.levels: int | None = None - - def delay(self, tasks: Sequence[RuntimeTask]) -> None: - """Update the multi-level queue with tasks.""" - for task in tasks: - task_depth = len(task.breadcrumbs) - - if task_depth not in self.queue: - if self.levels is None or task_depth > self.levels: - self.levels = task_depth - self.queue[task_depth] = [] - - self.queue[task_depth].append(task) - - def empty(self) -> bool: - """Return True if the multi-level queue is empty.""" - return self.levels is None - - def pop(self) -> RuntimeTask: - """Pop the next task from the multi-level queue.""" - if self.empty(): - raise RuntimeError('Cannot pop from an empty multi-level queue.') - - task = self.queue[self.levels].pop() # type: ignore # checked above - - while self.levels is not None: - if self.levels in self.queue: - if len(self.queue[self.levels]) != 0: - break - self.queue.pop(self.levels) - self.levels -= 1 - if self.levels < 0: - self.levels = None - - return task - - def sigint_handler(signum: int, _: FrameType | None, node: ServerBase) -> None: """Interrupt the node.""" if not node.running: @@ -187,9 +144,6 @@ def __init__(self) -> None: self.conn_to_employee_dict: dict[Connection, RuntimeEmployee] = {} """Used to find the employee associated with a message.""" - self.multi_level_queue = MultiLevelQueue() - """Used to delay tasks until they can be scheduled.""" - # Servers do not need blas threads set_blas_thread_counts(1) @@ -625,47 +579,18 @@ def schedule_tasks(self, tasks: Sequence[RuntimeTask]) -> None: reverse=True, ) for e, assignment in sorted_assignments: - self.send_tasks_to_employee(e, assignment) - - self.num_idle_workers = sum(e.num_idle_workers for e in self.employees) + num_tasks = len(assignment) - def schedule_for_workers(self, tasks: Sequence[RuntimeTask]) -> None: - """Schedule tasks for workers, return the amount assigned.""" - if len(tasks) == 0: - return + if num_tasks == 0: + continue - num_assigned_tasks = 0 - for e in self.employees: - # TODO: randomize? prioritize workers idle but with less tasks? - if num_assigned_tasks >= len(tasks): - break + self.outgoing.put((e.conn, RuntimeMessage.SUBMIT_BATCH, assignment)) - if e.num_idle_workers > 0: - task = tasks[num_assigned_tasks] - self.send_tasks_to_employee(e, [task]) - num_assigned_tasks += 1 + e.num_tasks += num_tasks + e.num_idle_workers -= min(num_tasks, e.num_idle_workers) + e.submit_cache.append((assignment[0].unique_id, num_tasks)) self.num_idle_workers = sum(e.num_idle_workers for e in self.employees) - self.multi_level_queue.delay(tasks[num_assigned_tasks:]) - - def send_tasks_to_employee( - self, - e: RuntimeEmployee, - tasks: Sequence[RuntimeTask], - ) -> None: - """Send the `task` to the employee responsible for `worker_id`.""" - num_tasks = len(tasks) - - if num_tasks == 0: - return - - e.num_tasks += num_tasks - e.num_idle_workers -= min(num_tasks, e.num_idle_workers) - e.submit_cache.append((tasks[0].unique_id, num_tasks)) - if num_tasks == 1: - self.outgoing.put((e.conn, RuntimeMessage.SUBMIT, tasks[0])) - else: - self.outgoing.put((e.conn, RuntimeMessage.SUBMIT_BATCH, tasks)) def send_result_down(self, result: RuntimeResult) -> None: """Send the `result` to the appropriate employee.""" @@ -716,36 +641,14 @@ def handle_waiting( idle count by the number of tasks sent since the read receipt. """ employee = self.conn_to_employee_dict[conn] - unaccounted_tasks = employee.get_num_of_tasks_sent_since(read_receipt) - adjusted_idle_count = max(new_idle_count - unaccounted_tasks, 0) + unaccounted_task = employee.get_num_of_tasks_sent_since(read_receipt) + adjusted_idle_count = max(new_idle_count - unaccounted_task, 0) old_count = employee.num_idle_workers employee.num_idle_workers = adjusted_idle_count self.num_idle_workers += (adjusted_idle_count - old_count) assert 0 <= self.num_idle_workers <= self.total_workers - def handle_direct_worker_waiting( - self, - conn: Connection, - new_idle_count: int, - read_receipt: RuntimeAddress | None, - ) -> None: - """ - Record that a worker is idle with nothing to do. - - Schedule tasks from the multi-level queue to the worker. - """ - ServerBase.handle_waiting(self, conn, new_idle_count, read_receipt) - - if self.multi_level_queue.empty(): - return - - employee = self.conn_to_employee_dict[conn] - if employee.num_idle_workers > 0: - task = self.multi_level_queue.pop() - self.send_tasks_to_employee(employee, [task]) - self.num_idle_workers -= 1 - def parse_ipports(ipports_str: Sequence[str]) -> list[tuple[str, int]]: """Parse command line ip and port inputs.""" diff --git a/bqskit/runtime/manager.py b/bqskit/runtime/manager.py index 0283be6c4..507cdf9a3 100644 --- a/bqskit/runtime/manager.py +++ b/bqskit/runtime/manager.py @@ -124,9 +124,6 @@ def __init__( num_blas_threads, ) - self.schedule_tasks = self.schedule_for_workers # type: ignore - self.handle_waiting = self.handle_direct_worker_waiting # type: ignore # noqa: E501 - # Case 2: Connect to detached managers at ipports else: self.connect_to_managers(ipports) diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index ca0feefa7..962ca1ff6 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -3,7 +3,6 @@ import inspect import logging -import pickle from typing import Any from typing import Coroutine @@ -42,13 +41,7 @@ def __init__( RuntimeTask.task_counter += 1 self.task_id = RuntimeTask.task_counter - try: - self.serialized_fnargs = pickle.dumps(fnargs) - self.serialized_with_pickle = True - except Exception: - self.serialized_fnargs = dill.dumps(fnargs) - self.serialized_with_pickle = False - + self.serialized_fnargs = dill.dumps(fnargs) self._fnargs: tuple[Any, Any, Any] | None = None self._name = fnargs[0].__name__ """Tuple of function pointer, arguments, and keyword arguments.""" @@ -91,10 +84,7 @@ def __init__( def fnargs(self) -> tuple[Any, Any, Any]: """Return the function pointer, arguments, and keyword arguments.""" if self._fnargs is None: - if self.serialized_with_pickle: - self._fnargs = pickle.loads(self.serialized_fnargs) - else: - self._fnargs = dill.loads(self.serialized_fnargs) + self._fnargs = dill.loads(self.serialized_fnargs) assert self._fnargs is not None # for type checker return self._fnargs diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index f2590bfaf..c034c2ce2 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -162,8 +162,8 @@ def __init__(self, id: int, conn: Connection) -> None: self._tasks: dict[RuntimeAddress, RuntimeTask] = {} """Tracks all started, unfinished tasks on this worker.""" - # self._delayed_tasks: list[RuntimeTask] = [] - # """Store all delayed tasks in LIFO order.""" + self._delayed_tasks: list[RuntimeTask] = [] + """Store all delayed tasks in LIFO order.""" self._ready_task_ids: Queue[RuntimeAddress] = Queue() """Tasks queued up for execution.""" @@ -203,7 +203,7 @@ def record_factory(*args: Any, **kwargs: Any) -> logging.LogRecord: lvl = active_task.logging_level if lvl is None or lvl <= record.levelno: tid = active_task.comp_task_id - self._send(RuntimeMessage.LOG, (tid, record)) + self._conn.send((RuntimeMessage.LOG, (tid, record))) return record logging.setLogRecordFactory(record_factory) @@ -215,7 +215,7 @@ def record_factory(*args: Any, **kwargs: Any) -> logging.LogRecord: _logger.debug('Started incoming thread.') # Communicate that this worker is ready - self._send(RuntimeMessage.STARTED, self._id) + self._conn.send((RuntimeMessage.STARTED, self._id)) def _loop(self) -> None: """Main worker event loop.""" @@ -228,7 +228,7 @@ def _loop(self) -> None: error_str = ''.join(traceback.format_exception(*exc_info)) _logger.error(error_str) try: - self._send(RuntimeMessage.ERROR, error_str) + self._conn.send((RuntimeMessage.ERROR, error_str)) except Exception: pass @@ -256,17 +256,17 @@ def recv_incoming(self) -> None: self._add_task(task) self.read_receipt_mutex.release() - # elif msg == RuntimeMessage.SUBMIT_BATCH: - # self.read_receipt_mutex.acquire() - # tasks = cast(List[RuntimeTask], payload) - # self.most_recent_read_submit = tasks[0].unique_id - # self._add_task(tasks.pop()) # Submit one task - # self._delayed_tasks.extend(tasks) # Delay rest - # # Delayed tasks have no context and are stored (more-or-less) - # # as a function pointer together with the arguments. - # # When it gets started, it consumes much more memory, - # # so we delay the task start until necessary (at no cost) - # self.read_receipt_mutex.release() + elif msg == RuntimeMessage.SUBMIT_BATCH: + self.read_receipt_mutex.acquire() + tasks = cast(List[RuntimeTask], payload) + self.most_recent_read_submit = tasks[0].unique_id + self._add_task(tasks.pop()) # Submit one task + self._delayed_tasks.extend(tasks) # Delay rest + # Delayed tasks have no context and are stored (more-or-less) + # as a function pointer together with the arguments. + # When it gets started, it consumes much more memory, + # so we delay the task start until necessary (at no cost) + self.read_receipt_mutex.release() elif msg == RuntimeMessage.RESULT: result = cast(RuntimeResult, payload) @@ -340,17 +340,17 @@ def _handle_cancel(self, addr: RuntimeAddress) -> None: } # Remove all tasks that are children of `addr` from delayed tasks - # self._delayed_tasks = [ - # t for t in self._delayed_tasks - # if not t.is_descendant_of(addr) - # ] + self._delayed_tasks = [ + t for t in self._delayed_tasks + if not t.is_descendant_of(addr) + ] def _get_next_ready_task(self) -> RuntimeTask | None: """Return the next ready task if one exists, otherwise block.""" while True: - # if self._ready_task_ids.empty() and len(self._delayed_tasks) > 0: - # self._add_task(self._delayed_tasks.pop()) - # continue + if self._ready_task_ids.empty() and len(self._delayed_tasks) > 0: + self._add_task(self._delayed_tasks.pop()) + continue self.read_receipt_mutex.acquire() try: @@ -358,7 +358,7 @@ def _get_next_ready_task(self) -> RuntimeTask | None: except Empty: payload = (1, self.most_recent_read_submit) - self._send(RuntimeMessage.WAITING, payload) + self._conn.send((RuntimeMessage.WAITING, payload)) self.read_receipt_mutex.release() addr = self._ready_task_ids.get() @@ -412,7 +412,7 @@ def _try_step_next_ready_task(self) -> None: exc_info = sys.exc_info() error_str = ''.join(traceback.format_exception(*exc_info)) error_payload = (self._active_task.comp_task_id, error_str) - self._send(RuntimeMessage.ERROR, error_payload) + self._conn.send((RuntimeMessage.ERROR, error_payload)) finally: self._active_task = None @@ -456,11 +456,11 @@ def _process_task_completion(self, task: RuntimeTask, result: Any) -> None: if task.return_address.worker_id == self._id: self._handle_result(packaged_result) - self._send(RuntimeMessage.UPDATE, -1) + self._conn.send((RuntimeMessage.UPDATE, -1)) # Let manager know this worker has one less task # without sending a result else: - self._send(RuntimeMessage.RESULT, packaged_result) + self._conn.send((RuntimeMessage.RESULT, packaged_result)) # Remove task self._tasks.pop(task.return_address) @@ -498,12 +498,6 @@ def _get_new_mailbox_id(self) -> int: self._mailbox_counter += 1 return new_id - def _send(self, msg: RuntimeMessage, payload: Any) -> None: - """Send a message to the boss.""" - _logger.debug(f'Sending message {msg.name}.') - _logger.log(1, f'Payload: {payload}') - self._conn.send((msg, payload)) - def submit( self, fn: Callable[..., Any], @@ -531,7 +525,7 @@ def submit( ) # Submit the task (on the next cycle) - self._send(RuntimeMessage.SUBMIT, task) + self._conn.send((RuntimeMessage.SUBMIT, task)) # Return future pointing to the mailbox return RuntimeFuture(mailbox_id) @@ -578,7 +572,7 @@ def map( ] # Submit the tasks - self._send(RuntimeMessage.SUBMIT_BATCH, tasks) + self._conn.send((RuntimeMessage.SUBMIT_BATCH, tasks)) # Return future pointing to the mailbox return RuntimeFuture(mailbox_id) @@ -594,7 +588,7 @@ def cancel(self, future: RuntimeFuture) -> None: for slot_id in range(num_slots) ] for addr in addrs: - self._send(RuntimeMessage.CANCEL, addr) + self._conn.send((RuntimeMessage.CANCEL, addr)) def get_cache(self) -> dict[str, Any]: """ diff --git a/bqskit/utils/cachedclass.py b/bqskit/utils/cachedclass.py index 7edac47b9..751a361b6 100644 --- a/bqskit/utils/cachedclass.py +++ b/bqskit/utils/cachedclass.py @@ -63,8 +63,7 @@ def __new__(cls: type[T], *args: Any, **kwargs: Any) -> T: _instances = cls._instances # type: ignore if _instances.get(key, None) is None: - _logger.log( - 1, + _logger.debug( ( 'Creating cached instance for class: %s,' ' with args %s, and kwargs %s' From a5bb9effcfadf2115057dcc2e85a80196138a5db Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Fri, 12 Apr 2024 09:14:22 -0400 Subject: [PATCH 034/197] Fixed 3.8 and 3.9 worker log issue --- bqskit/runtime/worker.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index c034c2ce2..8aa0b4ce7 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -209,9 +209,9 @@ def record_factory(*args: Any, **kwargs: Any) -> logging.LogRecord: logging.setLogRecordFactory(record_factory) # Start incoming thread - self.incomming_thread = Thread(target=self.recv_incoming) - self.incomming_thread.daemon = True - self.incomming_thread.start() + self.incoming_thread = Thread(target=self.recv_incoming) + self.incoming_thread.daemon = True + self.incoming_thread.start() _logger.debug('Started incoming thread.') # Communicate that this worker is ready @@ -241,6 +241,7 @@ def recv_incoming(self) -> None: except Exception: _logger.debug('Crashed due to lost connection') os.kill(os.getpid(), signal.SIGKILL) + exit() _logger.debug(f'Received message {msg.name}.') _logger.log(1, f'Payload: {payload}') @@ -675,6 +676,7 @@ def start_worker( # If id isn't provided, wait for assignment if w_id is None: msg, w_id = conn.recv() + assert isinstance(w_id, int) assert msg == RuntimeMessage.STARTED # Set up runtime logging @@ -684,9 +686,9 @@ def start_worker( _handler = logging.StreamHandler() _handler.setLevel(0) _fmt_header = '%(asctime)s.%(msecs)03d - %(levelname)-8s |' - _fmt_message = ' [wid=%(wid)s]: %(message)s' + _fmt_message = f' [wid={w_id}]: %(message)s' _fmt = _fmt_header + _fmt_message - _formatter = logging.Formatter(_fmt, '%H:%M:%S', defaults={'wid': w_id}) + _formatter = logging.Formatter(_fmt, '%H:%M:%S') _handler.setFormatter(_formatter) _runtime_logger.addHandler(_handler) From 4189b237a34cbaf3e474753a20be167b39b9bc1c Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Fri, 12 Apr 2024 13:04:46 -0400 Subject: [PATCH 035/197] Reduce CachedClass logging --- bqskit/utils/cachedclass.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bqskit/utils/cachedclass.py b/bqskit/utils/cachedclass.py index 751a361b6..7edac47b9 100644 --- a/bqskit/utils/cachedclass.py +++ b/bqskit/utils/cachedclass.py @@ -63,7 +63,8 @@ def __new__(cls: type[T], *args: Any, **kwargs: Any) -> T: _instances = cls._instances # type: ignore if _instances.get(key, None) is None: - _logger.debug( + _logger.log( + 1, ( 'Creating cached instance for class: %s,' ' with args %s, and kwargs %s' From d6eb2be702bcc8f6a9d27a4c0e8ec5cdac79c982 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Fri, 12 Apr 2024 13:05:05 -0400 Subject: [PATCH 036/197] Add log context capabilities to runtime --- bqskit/runtime/__init__.py | 116 ++++++++++++++++++++++++++++++++++++- bqskit/runtime/task.py | 9 +-- bqskit/runtime/worker.py | 56 ++++++++++++++++++ 3 files changed, 175 insertions(+), 6 deletions(-) diff --git a/bqskit/runtime/__init__.py b/bqskit/runtime/__init__.py index 2c01c4ddd..371427ffd 100644 --- a/bqskit/runtime/__init__.py +++ b/bqskit/runtime/__init__.py @@ -98,6 +98,7 @@ from typing import Any from typing import Callable from typing import Protocol +from typing import Sequence from typing import TYPE_CHECKING if TYPE_CHECKING: @@ -143,18 +144,129 @@ def submit( self, fn: Callable[..., Any], *args: Any, + task_name: str | None = None, + log_context: dict[str, str] = {}, **kwargs: Any, ) -> RuntimeFuture: - """Submit a `fn` to the runtime.""" + """ + Submit a function to the runtime for execution. + + This method schedules the function `fn` to be executed by the + runtime with the provided arguments `args` and keyword arguments + `kwargs`. The execution may happen asynchronously. + + Args: + fn (Callable[..., Any]): The function to be executed. + + *args (Any): Variable length argument list to be passed to + the function `fn`. + + task_name (str | None): An optional name for the task, which + can be used for logging or tracking purposes. Defaults to + None, which will use the function name as the task name. + + log_context (dict[str, str]): A dictionary containing logging + context information. All log messages produced by the fn + and any children tasks will contain this context if the + appropriate level (logging.DEBUG) is set on the logger. + Defaults to an empty dictionary for no added context. + + **kwargs (Any): Arbitrary keyword arguments to be passed to + the function `fn`. + + Returns: + RuntimeFuture: An object representing the future result of + the function execution. This can be used to retrieve the + result by `await`ing it. + + Example: + >>> from bqskit.runtime import get_runtime + >>> + >>> def add(x, y): + ... return x + y + >>> + >>> future = get_runtime().submit(add, 1, 2) + >>> result = await future + >>> print(result) + 3 + + See Also: + - :func:`map` for submitting multiple tasks in parallel. + - :func:`cancel` for cancelling tasks. + - :class:`~bqskit.runtime.future.RuntimeFuture` for more + information on how to interact with the future object. + """ ... def map( self, fn: Callable[..., Any], *args: Any, + task_name: Sequence[str | None] | str | None = None, + log_context: Sequence[dict[str, str]] | dict[str, str] = {}, **kwargs: Any, ) -> RuntimeFuture: - """Map `fn` over the input arguments distributed across the runtime.""" + """ + Map a function over a sequence of arguments and execute in parallel. + + This method schedules the function `fn` to be executed by the runtime + for each set of arguments provided in `args`. Each invocation of `fn` + will be executed potentially in parallel, depending on the runtime's + capabilities and current load. + + Args: + fn (Callable[..., Any]): The function to be executed. + + *args (Any): Variable length argument list to be passed to + the function `fn`. Each argument is expected to be a + sequence of arguments to be passed to a separate + invocation. The sequences should be of equal length. + + task_name (Sequence[str | None] | str | None): An optional + name for the task group, which can be used for logging + or tracking purposes. Defaults to None, which will use + the function name as the task name. If a string is + provided, it will be used as the prefix for all task + names. If a sequence of strings is provided, each task + will be named with the corresponding string in the + sequence. + + log_context (Sequence[dict[str, str]]) | dict[str, str]): A + dictionary containing logging context information. All + log messages produced by the `fn` and any children tasks + will contain this context if the appropriate level + (logging.DEBUG) is set on the logger. Defaults to an + empty dictionary for no added context. Can be a sequence + of contexts, one for each task, or a single context to be + used for all tasks. + + **kwargs (Any): Arbitrary keyword arguments to be passed to + each invocation of the function `fn`. + + Returns: + RuntimeFuture: An object representing the future result of + the function executions. This can be used to retrieve the + results by `await`ing it, which will return a list. + + Example: + >>> from bqskit.runtime import get_runtime + >>> + >>> def add(x, y): + ... return x + y + >>> + >>> args_list = [(1, 2, 3), (4, 5, 6)] + >>> future = get_runtime().map(add, *args_list) + >>> results = await future + >>> print(results) + [5, 7, 9] + + See Also: + - :func:`submit` for submitting a single task. + - :func:`cancel` for cancelling tasks. + - :func:`next` for retrieving results incrementally. + - :class:`~bqskit.runtime.future.RuntimeFuture` for more + information on how to interact with the future object. + """ ... def cancel(self, future: RuntimeFuture) -> None: diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index 962ca1ff6..d74d79ec4 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -36,6 +36,8 @@ def __init__( breadcrumbs: tuple[RuntimeAddress, ...], logging_level: int | None = None, max_logging_depth: int = -1, + task_name: str | None = None, + log_context: dict[str, str] = {}, ) -> None: """Create the task with a new id and return address.""" RuntimeTask.task_counter += 1 @@ -43,7 +45,7 @@ def __init__( self.serialized_fnargs = dill.dumps(fnargs) self._fnargs: tuple[Any, Any, Any] | None = None - self._name = fnargs[0].__name__ + self._name = fnargs[0].__name__ if task_name is None else task_name """Tuple of function pointer, arguments, and keyword arguments.""" self.return_address = return_address @@ -68,9 +70,6 @@ def __init__( self.coro: Coroutine[Any, Any, Any] | None = None """The coroutine containing this tasks code.""" - # self.send: Any = None - # """A register that both the coroutine and task have access to.""" - self.desired_box_id: int | None = None """When waiting on a mailbox, this stores that mailbox's id.""" @@ -80,6 +79,8 @@ def __init__( self.wake_on_next: bool = False """Set to true if this task should wake immediately on a result.""" + self.log_context: dict[str, str] = log_context + @property def fnargs(self) -> tuple[Any, Any, Any]: """Return the function pointer, arguments, and keyword arguments.""" diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 8aa0b4ce7..dee3bdca7 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -20,6 +20,7 @@ from typing import Callable from typing import cast from typing import List +from typing import Sequence from bqskit.runtime import default_worker_port from bqskit.runtime import set_blas_thread_counts @@ -202,6 +203,14 @@ def record_factory(*args: Any, **kwargs: Any) -> logging.LogRecord: if active_task is not None: lvl = active_task.logging_level if lvl is None or lvl <= record.levelno: + if lvl <= logging.DEBUG: + record.msg += f' [wid={self._id}' + items = active_task.log_context.items() + if len(items) > 0: + record.msg += ', ' + con_str = ', '.join(f'{k}={v}' for k, v in items) + record.msg += con_str + record.msg += ']' tid = active_task.comp_task_id self._conn.send((RuntimeMessage.LOG, (tid, record))) return record @@ -503,10 +512,25 @@ def submit( self, fn: Callable[..., Any], *args: Any, + task_name: str | None = None, + log_context: dict[str, str] = {}, **kwargs: Any, ) -> RuntimeFuture: """Submit `fn` as a task to the runtime.""" assert self._active_task is not None + + if task_name is not None and not isinstance(task_name, str): + raise RuntimeError('task_name must be a string.') + + if not isinstance(log_context, dict): + raise RuntimeError('log_context must be a dictionary.') + + for k, v in log_context.items(): + if not isinstance(k, str) or not isinstance(v, str): + raise RuntimeError( + 'log_context must be a map from strings to strings.', + ) + # Group fnargs together fnarg = (fn, args, kwargs) @@ -523,6 +547,8 @@ def submit( self._active_task.breadcrumbs + (self._active_task.return_address,), self._active_task.logging_level, self._active_task.max_logging_depth, + task_name, + self._active_task.log_context | log_context, ) # Submit the task (on the next cycle) @@ -535,10 +561,38 @@ def map( self, fn: Callable[..., Any], *args: Any, + task_name: Sequence[str | None] | str | None = None, + log_context: Sequence[dict[str, str]] | dict[str, str] = {}, **kwargs: Any, ) -> RuntimeFuture: """Map `fn` over the input arguments distributed across the runtime.""" assert self._active_task is not None + + if task_name is None or isinstance(task_name, str): + task_name = [task_name] * len(args[0]) + + if len(task_name) != len(args[0]): + raise RuntimeError( + 'task_name must be a string or a list of strings equal' + 'in length to the number of tasks.', + ) + + if isinstance(log_context, dict): + log_context = [log_context] * len(args[0]) + + if len(log_context) != len(args[0]): + raise RuntimeError( + 'log_context must be a dictionary or a list of dictionaries' + ' equal in length to the number of tasks.', + ) + + for context in log_context: + for k, v in context.items(): + if not isinstance(k, str) or not isinstance(v, str): + raise RuntimeError( + 'log_context must be a map from strings to strings.', + ) + # Group fnargs together fnargs = [] if len(args) == 1: @@ -568,6 +622,8 @@ def map( breadcrumbs, self._active_task.logging_level, self._active_task.max_logging_depth, + task_name[i], + self._active_task.log_context | log_context[i], ) for i, fnarg in enumerate(fnargs) ] From 264337cd297540d12d8389867bec002d11cfe3c1 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Fri, 12 Apr 2024 14:54:07 -0400 Subject: [PATCH 037/197] Fix python 3.8 dict update --- bqskit/runtime/worker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index dee3bdca7..be7dd07b9 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -548,7 +548,7 @@ def submit( self._active_task.logging_level, self._active_task.max_logging_depth, task_name, - self._active_task.log_context | log_context, + {**self._active_task.log_context, **log_context}, ) # Submit the task (on the next cycle) @@ -623,7 +623,7 @@ def map( self._active_task.logging_level, self._active_task.max_logging_depth, task_name[i], - self._active_task.log_context | log_context[i], + {**self._active_task.log_context, **log_context[i]}, ) for i, fnarg in enumerate(fnargs) ] From 2065a5f6792ca5e0d7eedf5241b1812b1aa39db4 Mon Sep 17 00:00:00 2001 From: alonkukl Date: Fri, 12 Apr 2024 22:18:15 -0700 Subject: [PATCH 038/197] Fixing the way we kill the worker on windows --- bqskit/runtime/worker.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index dee3bdca7..168a4adcb 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -249,7 +249,10 @@ def recv_incoming(self) -> None: msg, payload = self._conn.recv() except Exception: _logger.debug('Crashed due to lost connection') - os.kill(os.getpid(), signal.SIGKILL) + if sys.platform == 'win32': + os.kill(os.getpid(), 9) + else: + os.kill(os.getpid(), signal.SIGKILL) exit() _logger.debug(f'Received message {msg.name}.') @@ -257,7 +260,10 @@ def recv_incoming(self) -> None: # Process message if msg == RuntimeMessage.SHUTDOWN: - os.kill(os.getpid(), signal.SIGKILL) + if sys.platform == 'win32': + os.kill(os.getpid(), 9) + else: + os.kill(os.getpid(), signal.SIGKILL) elif msg == RuntimeMessage.SUBMIT: self.read_receipt_mutex.acquire() @@ -698,6 +704,7 @@ def start_worker( # Ignore interrupt signals on workers, boss will handle it for us # If w_id is None, then we are being spawned separately. signal.signal(signal.SIGINT, signal.SIG_IGN) + # TODO: check what needs to be done on win # Set number of BLAS threads set_blas_thread_counts(num_blas_threads) From 851ddf35f6cac816ec0ca64f1c8333e800f70698 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sat, 13 Apr 2024 12:46:56 -0400 Subject: [PATCH 039/197] Fixed passes imports --- bqskit/compiler/__init__.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/bqskit/compiler/__init__.py b/bqskit/compiler/__init__.py index f7048aa56..cd47fec74 100644 --- a/bqskit/compiler/__init__.py +++ b/bqskit/compiler/__init__.py @@ -36,9 +36,9 @@ WorkflowLike """ from __future__ import annotations +from typing import Any from bqskit.compiler.basepass import BasePass -from bqskit.compiler.compile import compile from bqskit.compiler.compiler import Compiler from bqskit.compiler.gateset import GateSet from bqskit.compiler.gateset import GateSetLike @@ -49,6 +49,16 @@ from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike +def __getattr__(name: str) -> Any: + # Lazy imports + if name == 'compile': + from bqskit.compiler.compile import compile + return compile + + # TODO: Move compile to a different subpackage and deprecate import + + raise AttributeError(f'module {__name__} has no attribute {name}') + __all__ = [ 'BasePass', 'compile', From 2a3b94140e9d9aa9d481185aee6dae0cd74015e4 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sat, 13 Apr 2024 12:48:07 -0400 Subject: [PATCH 040/197] Implements #165 --- bqskit/passes/synthesis/pas.py | 1 + 1 file changed, 1 insertion(+) diff --git a/bqskit/passes/synthesis/pas.py b/bqskit/passes/synthesis/pas.py index 2cad278d2..9a109f15a 100644 --- a/bqskit/passes/synthesis/pas.py +++ b/bqskit/passes/synthesis/pas.py @@ -113,6 +113,7 @@ async def synthesize( self.inner_synthesis.synthesize, targets, [data] * len(targets), + log_context=[{'perm': str(perm)} for perm in permsbyperms], ) # Return best circuit From 4682e098c6560fd48f2cc1ecc49f27d0b5dedbe7 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sat, 13 Apr 2024 12:49:04 -0400 Subject: [PATCH 041/197] pre-commit --- bqskit/compiler/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bqskit/compiler/__init__.py b/bqskit/compiler/__init__.py index cd47fec74..c0ef1b3bf 100644 --- a/bqskit/compiler/__init__.py +++ b/bqskit/compiler/__init__.py @@ -36,6 +36,7 @@ WorkflowLike """ from __future__ import annotations + from typing import Any from bqskit.compiler.basepass import BasePass @@ -49,6 +50,7 @@ from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike + def __getattr__(name: str) -> Any: # Lazy imports if name == 'compile': @@ -59,6 +61,7 @@ def __getattr__(name: str) -> Any: raise AttributeError(f'module {__name__} has no attribute {name}') + __all__ = [ 'BasePass', 'compile', From 75d42f9315258542a29c9d6d2659b04129585330 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 15 Apr 2024 16:34:28 -0400 Subject: [PATCH 042/197] Small doc fix --- bqskit/compiler/compiler.py | 2 +- bqskit/ext/__init__.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/bqskit/compiler/compiler.py b/bqskit/compiler/compiler.py index 22fab9feb..8f58f3c35 100644 --- a/bqskit/compiler/compiler.py +++ b/bqskit/compiler/compiler.py @@ -293,7 +293,7 @@ def submit( tasks equal opportunity to log. Returns: - (uuid.UUID): The ID of the generated task in the system. This + uuid.UUID: The ID of the generated task in the system. This ID can be used to check the status of, cancel, and request the result of the task. """ diff --git a/bqskit/ext/__init__.py b/bqskit/ext/__init__.py index a6a4ee456..4cd607e74 100644 --- a/bqskit/ext/__init__.py +++ b/bqskit/ext/__init__.py @@ -53,6 +53,7 @@ """ from __future__ import annotations +# TODO: Deprecate imports from __init__, use lazy import to deprecate from bqskit.ext.cirq.models import Sycamore23Model from bqskit.ext.cirq.models import SycamoreModel from bqskit.ext.cirq.translate import bqskit_to_cirq From 95b06d299541f1b1eeaf083a282543a2ac91c650 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Tue, 16 Apr 2024 08:34:14 -0400 Subject: [PATCH 043/197] Add toggle for workers to print client log msgs --- bqskit/compiler/__init__.py | 1 + bqskit/ext/__init__.py | 2 +- bqskit/runtime/worker.py | 25 ++++++++++++++++++++++--- 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/bqskit/compiler/__init__.py b/bqskit/compiler/__init__.py index c0ef1b3bf..fde8f87ee 100644 --- a/bqskit/compiler/__init__.py +++ b/bqskit/compiler/__init__.py @@ -54,6 +54,7 @@ def __getattr__(name: str) -> Any: # Lazy imports if name == 'compile': + # TODO: fix this (high-priority), overlap between module and function from bqskit.compiler.compile import compile return compile diff --git a/bqskit/ext/__init__.py b/bqskit/ext/__init__.py index 4cd607e74..131eec208 100644 --- a/bqskit/ext/__init__.py +++ b/bqskit/ext/__init__.py @@ -53,7 +53,6 @@ """ from __future__ import annotations -# TODO: Deprecate imports from __init__, use lazy import to deprecate from bqskit.ext.cirq.models import Sycamore23Model from bqskit.ext.cirq.models import SycamoreModel from bqskit.ext.cirq.translate import bqskit_to_cirq @@ -74,6 +73,7 @@ from bqskit.ext.supermarq import supermarq_liveness from bqskit.ext.supermarq import supermarq_parallelism from bqskit.ext.supermarq import supermarq_program_communication +# TODO: Deprecate imports from __init__, use lazy import to deprecate __all__ = [ diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 0738b02dd..ca7c87a4d 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -698,6 +698,7 @@ def start_worker( cpu: int | None = None, logging_level: int = logging.WARNING, num_blas_threads: int = 1, + log_client: bool = False, ) -> None: """Start this process's worker.""" if w_id is not None: @@ -710,7 +711,7 @@ def start_worker( set_blas_thread_counts(num_blas_threads) # Enforce no default logging - logging.lastResort = logging.NullHandler() # type: ignore # TODO: should I report this as a type bug? # noqa: E501 + logging.lastResort = logging.NullHandler() # type: ignore # typeshed#11770 logging.getLogger().handlers.clear() # Pin worker to cpu @@ -743,7 +744,10 @@ def start_worker( assert msg == RuntimeMessage.STARTED # Set up runtime logging - _runtime_logger = logging.getLogger('bqskit.runtime') + if not log_client: + _runtime_logger = logging.getLogger('bqskit.runtime') + else: + _runtime_logger = logging.getLogger() _runtime_logger.propagate = False _runtime_logger.setLevel(logging_level) _handler = logging.StreamHandler() @@ -809,6 +813,11 @@ def start_worker_rank() -> None: default=0, help='Enable logging of increasing verbosity, either -v, -vv, or -vvv.', ) + parser.add_argument( + '-l', '--log-client', + action='store_true', + help='Log messages from the client process.', + ) parser.add_argument( '-t', '--num_blas_threads', type=int, @@ -836,10 +845,20 @@ def start_worker_rank() -> None: logging_level = [30, 20, 10, 1][min(args.verbose, 3)] + if args.log_client and logging_level > 10: + raise RuntimeError('Cannot log client messages without at least -vv.') + # Spawn worker process procs = [] for cpu in cpus: - pargs = (None, args.port, cpu, logging_level, args.num_blas_threads) + pargs = ( + None, + args.port, + cpu, + logging_level, + args.num_blas_threads, + args.log_client, + ) procs.append(Process(target=start_worker, args=pargs)) procs[-1].start() From e5f67f6359c62d8aebf8d0755482b90a815994a2 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Tue, 16 Apr 2024 10:39:54 -0400 Subject: [PATCH 044/197] Added direction to runtime sent log --- bqskit/runtime/base.py | 8 +++++++- bqskit/runtime/detached.py | 7 +++++++ bqskit/runtime/manager.py | 7 +++++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index 1cb53c494..9f9dd2a27 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -387,7 +387,9 @@ def send_outgoing(self) -> None: continue outgoing[0].send((outgoing[1], outgoing[2])) - _logger.debug(f'Sent message {outgoing[1].name}.') + if _logger.isEnabledFor(logging.DEBUG): + to = self.get_to_string(outgoing[0]) + _logger.debug(f'Sent message {outgoing[1].name} to {to}.') if outgoing[1] == RuntimeMessage.SUBMIT_BATCH: _logger.log(1, f'[{outgoing[2][0]}] * {len(outgoing[2])}\n') @@ -471,6 +473,10 @@ def handle_system_error(self, error_str: str) -> None: RuntimeTask's coroutine code. """ + @abc.abstractmethod + def get_to_string(self, conn: Connection) -> str: + """Return a string representation of the connection.""" + def handle_shutdown(self) -> None: """Shutdown the node and release resources.""" # Stop running diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index 230a5949d..6c0bcd51b 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -217,6 +217,13 @@ def handle_system_error(self, error_str: str) -> None: # Sleep to ensure clients receive error message before shutdown time.sleep(1) + def get_to_string(self, conn: Connection) -> str: + """Return a string representation of the connection.""" + if conn in self.clients: + return 'Client' + + return 'Employee' + def handle_shutdown(self) -> None: """Shutdown the runtime.""" super().handle_shutdown() diff --git a/bqskit/runtime/manager.py b/bqskit/runtime/manager.py index 507cdf9a3..00bb4b2b7 100644 --- a/bqskit/runtime/manager.py +++ b/bqskit/runtime/manager.py @@ -228,6 +228,13 @@ def handle_system_error(self, error_str: str) -> None: # If server has crashed then just exit pass + def get_to_string(self, conn: Connection) -> str: + """Return a string representation of the connection.""" + if conn == self.upstream: + return 'Boss' + + return 'Employee' + def handle_shutdown(self) -> None: """Shutdown the manager and clean up spawned processes.""" super().handle_shutdown() From 104d35306de2afa8db25c0e51de5e73689a70d3d Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 17 Apr 2024 07:54:48 -0400 Subject: [PATCH 045/197] Address comments and some clean up --- bqskit/runtime/base.py | 42 ++++++++++++++++++++++++++++++-------- bqskit/runtime/detached.py | 4 ++-- bqskit/runtime/manager.py | 6 ++---- bqskit/runtime/worker.py | 29 ++++++++++++++++++++------ 4 files changed, 60 insertions(+), 21 deletions(-) diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index 9f9dd2a27..d10996e49 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -42,16 +42,28 @@ class RuntimeEmployee: def __init__( self, + id: int, conn: Connection, total_workers: int, process: Process | None = None, + is_manager: bool = False, ) -> None: """Construct an employee with all resources idle.""" + + self.id = id + """ + The ID of the employee. + + If this is a worker, then their unique worker id. If this is a manager, + then their local id. + """ + self.conn: Connection = conn self.total_workers = total_workers self.process = process self.num_tasks = 0 self.num_idle_workers = total_workers + self.is_manager = is_manager self.submit_cache: list[tuple[RuntimeAddress, int]] = [] """ @@ -81,6 +93,11 @@ def shutdown(self) -> None: self.initiate_shutdown() self.complete_shutdown() + @property + def recipient_string(self) -> str: + """Return a string representation of the employee.""" + return f'{"Manager" if self.is_manager else "Worker"} {self.id}' + @property def has_idle_resources(self) -> bool: return self.num_idle_workers > 0 @@ -179,7 +196,14 @@ def connect_to_managers(self, ipports: Sequence[tuple[str, int]]) -> None: for i, conn in enumerate(manager_conns): msg, num_workers = conn.recv() assert msg == RuntimeMessage.STARTED - self.employees.append(RuntimeEmployee(conn, num_workers)) + self.employees.append( + RuntimeEmployee( + i, + conn, + num_workers, + is_manager=True, + ), + ) self.conn_to_employee_dict[conn] = self.employees[-1] self.sel.register( conn, @@ -286,7 +310,7 @@ def spawn_workers( for i, conn in enumerate(conns): msg, w_id = conn.recv() assert msg == RuntimeMessage.STARTED - employee = RuntimeEmployee(conn, 1, procs[w_id]) + employee = RuntimeEmployee(w_id, conn, 1, procs[w_id]) temp_reorder[w_id - self.lower_id_bound] = employee self.conn_to_employee_dict[conn] = employee @@ -295,13 +319,13 @@ def spawn_workers( self.employees.append(temp_reorder[i]) # Register employee communication - for i, employee in enumerate(self.employees): + for employee in self.employees: self.sel.register( employee.conn, selectors.EVENT_READ, MessageDirection.BELOW, ) - _logger.debug(f'Registered worker {i}.') + _logger.debug(f'Registered worker {employee.id}.') self.step_size = 1 self.total_workers = num_workers @@ -343,20 +367,20 @@ def connect_to_workers( for i, conn in enumerate(conns): w_id = self.lower_id_bound + i self.outgoing.put((conn, RuntimeMessage.STARTED, w_id)) - employee = RuntimeEmployee(conn, 1) + employee = RuntimeEmployee(w_id, conn, 1) self.employees.append(employee) self.conn_to_employee_dict[conn] = employee # Register employee communication - for i, employee in enumerate(self.employees): - w_id = self.lower_id_bound + i + for employee in self.employees: + w_id = employee.id assert employee.conn.recv() == (RuntimeMessage.STARTED, w_id) self.sel.register( employee.conn, selectors.EVENT_READ, MessageDirection.BELOW, ) - _logger.info(f'Registered worker {i}.') + _logger.info(f'Registered worker {w_id}.') self.step_size = 1 self.total_workers = num_workers @@ -583,7 +607,7 @@ def schedule_tasks(self, tasks: Sequence[RuntimeTask]) -> None: assignments, key=lambda x: x[0].num_idle_workers, reverse=True, - ) + ) # Employees with the most idle workers get assignments first for e, assignment in sorted_assignments: num_tasks = len(assignment) diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index 6c0bcd51b..90ad2b964 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -220,9 +220,9 @@ def handle_system_error(self, error_str: str) -> None: def get_to_string(self, conn: Connection) -> str: """Return a string representation of the connection.""" if conn in self.clients: - return 'Client' + return 'CLIENT' - return 'Employee' + return self.conn_to_employee_dict[conn].recipient_string def handle_shutdown(self) -> None: """Shutdown the runtime.""" diff --git a/bqskit/runtime/manager.py b/bqskit/runtime/manager.py index 00bb4b2b7..0edd64f8f 100644 --- a/bqskit/runtime/manager.py +++ b/bqskit/runtime/manager.py @@ -183,12 +183,10 @@ def handle_message( if msg == RuntimeMessage.SUBMIT: rtask = cast(RuntimeTask, payload) self.send_up_or_schedule_tasks([rtask]) - # self.update_upstream_idle_workers() elif msg == RuntimeMessage.SUBMIT_BATCH: rtasks = cast(List[RuntimeTask], payload) self.send_up_or_schedule_tasks(rtasks) - # self.update_upstream_idle_workers() elif msg == RuntimeMessage.RESULT: result = cast(RuntimeResult, payload) @@ -231,9 +229,9 @@ def handle_system_error(self, error_str: str) -> None: def get_to_string(self, conn: Connection) -> str: """Return a string representation of the connection.""" if conn == self.upstream: - return 'Boss' + return 'BOSS' - return 'Employee' + return self.conn_to_employee_dict[conn].recipient_string def handle_shutdown(self) -> None: """Shutdown the manager and clean up spawned processes.""" diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index ca7c87a4d..82cf1ab73 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -164,7 +164,14 @@ def __init__(self, id: int, conn: Connection) -> None: """Tracks all started, unfinished tasks on this worker.""" self._delayed_tasks: list[RuntimeTask] = [] - """Store all delayed tasks in LIFO order.""" + """ + Store all delayed tasks in LIFO order. + + Delayed tasks have no context and are stored (more-or-less) as a + function pointer together with the arguments. When it gets started, it + consumes much more memory, so we delay the task start until necessary + (at no cost) + """ self._ready_task_ids: Queue[RuntimeAddress] = Queue() """Tasks queued up for execution.""" @@ -191,7 +198,13 @@ def __init__(self, id: int, conn: Connection) -> None: """Tracks the most recently processed submit message from above.""" self.read_receipt_mutex = Lock() - """A lock to ensure waiting messages's read receipt is correct.""" + """ + A lock to ensure waiting messages's read receipt is correct. + + This lock enforces atomic update of `most_recent_read_submit` and + task addition/enqueueing. This is necessary to ensure that the + idle status is always correct. + """ # Send out every client emitted log message upstream old_factory = logging.getLogRecordFactory() @@ -278,10 +291,6 @@ def recv_incoming(self) -> None: self.most_recent_read_submit = tasks[0].unique_id self._add_task(tasks.pop()) # Submit one task self._delayed_tasks.extend(tasks) # Delay rest - # Delayed tasks have no context and are stored (more-or-less) - # as a function pointer together with the arguments. - # When it gets started, it consumes much more memory, - # so we delay the task start until necessary (at no cost) self.read_receipt_mutex.release() elif msg == RuntimeMessage.RESULT: @@ -368,6 +377,12 @@ def _get_next_ready_task(self) -> RuntimeTask | None: self._add_task(self._delayed_tasks.pop()) continue + # Critical section + # Attempt to get a ready task. If none are available, message + # the manager/server with a waiting message letting them + # know the worker is idle. This needs to be atomic to prevent + # the self.more_recent_read_submit from being updated after + # catching the Empty exception, but before forming the payload. self.read_receipt_mutex.acquire() try: addr = self._ready_task_ids.get_nowait() @@ -376,6 +391,8 @@ def _get_next_ready_task(self) -> RuntimeTask | None: payload = (1, self.most_recent_read_submit) self._conn.send((RuntimeMessage.WAITING, payload)) self.read_receipt_mutex.release() + # Block for new message. Can release lock here since the + # the `self.most_recent_read_submit` has been used. addr = self._ready_task_ids.get() else: From ce64cabd7081888332195b9198c2ccd3306d8f75 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Fri, 19 Apr 2024 09:05:44 -0400 Subject: [PATCH 046/197] Some TODO cleanup --- bqskit/runtime/__init__.py | 6 ++---- bqskit/runtime/task.py | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/bqskit/runtime/__init__.py b/bqskit/runtime/__init__.py index 371427ffd..75e3691c7 100644 --- a/bqskit/runtime/__init__.py +++ b/bqskit/runtime/__init__.py @@ -70,10 +70,8 @@ :class:`RuntimeHandle`, which you can use to submit, map, wait on, and cancel tasks in the execution environment. -For more information on how to design a custom pass, see this (TODO, sorry, -you can look at the source code of existing -`passes `_ -for a good example for the time being). +For more information on how to design a custom pass, see the following +guide: :doc:`guides/custompass.md`. .. autosummary:: :toctree: autogen diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index d74d79ec4..3d81f1bbd 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -30,7 +30,7 @@ class RuntimeTask: def __init__( self, - fnargs: tuple[Any, Any, Any], # TODO: Look into retyping this + fnargs: tuple[Any, Any, Any], return_address: RuntimeAddress, comp_task_id: int, breadcrumbs: tuple[RuntimeAddress, ...], From d5a7f97e433f54c0f6c8965aa84e679836d3ad81 Mon Sep 17 00:00:00 2001 From: ermalrrapaj Date: Wed, 31 Jan 2024 22:05:10 -0800 Subject: [PATCH 047/197] added power gate --- bqskit/ir/gates/composed/__init__.py | 2 + bqskit/ir/gates/composed/powergate.py | 123 ++++++++++++++++++++++++++ 2 files changed, 125 insertions(+) create mode 100644 bqskit/ir/gates/composed/powergate.py diff --git a/bqskit/ir/gates/composed/__init__.py b/bqskit/ir/gates/composed/__init__.py index d1f065f34..6893552b8 100644 --- a/bqskit/ir/gates/composed/__init__.py +++ b/bqskit/ir/gates/composed/__init__.py @@ -3,6 +3,7 @@ from bqskit.ir.gates.composed.controlled import ControlledGate from bqskit.ir.gates.composed.daggergate import DaggerGate +from bqskit.ir.gates.composed.powergate import PowerGate from bqskit.ir.gates.composed.embedded import EmbeddedGate from bqskit.ir.gates.composed.frozenparam import FrozenParameterGate from bqskit.ir.gates.composed.tagged import TaggedGate @@ -10,6 +11,7 @@ __all__ = [ 'ControlledGate', + 'PowerGate', 'DaggerGate', 'EmbeddedGate', 'FrozenParameterGate', diff --git a/bqskit/ir/gates/composed/powergate.py b/bqskit/ir/gates/composed/powergate.py new file mode 100644 index 000000000..e3c969755 --- /dev/null +++ b/bqskit/ir/gates/composed/powergate.py @@ -0,0 +1,123 @@ +"""This module implements the DaggerGate Class.""" +from __future__ import annotations + +import numpy as np +import numpy.typing as npt + +from bqskit.ir.gate import Gate +from bqskit.ir.gates.composedgate import ComposedGate +from bqskit.qis.unitary.differentiable import DifferentiableUnitary +from bqskit.qis.unitary.optimizable import LocallyOptimizableUnitary +from bqskit.qis.unitary.unitary import RealVector +from bqskit.utils.typing import is_integer +from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix +from bqskit.utils.docs import building_docs + + +class PowerGate( + ComposedGate, + LocallyOptimizableUnitary, + DifferentiableUnitary, +): + """ + An arbitrary inverted gate. + + The PowerGate is a composed gate that equivalent to the + integer power of the input gate. + + For example: + >>> from bqskit.ir.gates import TGate, TdgGate + >>> PowerGate(TGate(),2).get_unitary() == TdgGate().get_unitary()*TdgGate().get_unitary() + True + """ + + def __init__(self, gate: Gate, power: int = 1) -> None: + """ + Create a gate which is the integer power of the input gate. + + Args: + gate (Gate): The Gate to conjugate transpose. + power (integer): The power index for the PowerGate + """ + if not isinstance(gate, Gate): + raise TypeError('Expected gate object, got %s' % type(gate)) + + if not is_integer(power): + raise TypeError( + f'Expected integer for num_controls, got {type(power)}.',) + + self.gate = gate + self.power =power + self._name = 'Power(%s)' % gate.name + self._num_params = gate.num_params + self._num_qudits = gate.num_qudits + self._radixes = gate.radixes + + # If input is a constant gate, we can cache the unitary. + if self.num_params == 0 and not building_docs(): + self.utry = np.linalg.matrix_power(gate.get_unitary(),self.power) + + def get_unitary(self, params: RealVector = []) -> UnitaryMatrix: + """Return the unitary for this gate, see :class:`Unitary` for more.""" + if hasattr(self, 'utry'): + return self.utry + + return np.linalg.matrix_power(self.gate.get_unitary(params),power) + + def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: + """ + Return the gradient for this gate. + + See :class:`DifferentiableUnitary` for more info. + + Notes: + The derivative of the integer power of matrix is equal + to the derivative of the matrix multiplied by the integer-1 power of the matrix + and by the integer power. + """ + if hasattr(self, 'utry'): + return np.array([]) + + grads = self.gate.get_grad(params) # type: ignore + return self.power*np.linalg.matrix_power(self.gate.get_unitary(params),power-1)@grads + + + def get_unitary_and_grad( + self, + params: RealVector = [], + ) -> tuple[UnitaryMatrix, npt.NDArray[np.complex128]]: + """ + Return the unitary and gradient for this gate. + + See :class:`DifferentiableUnitary` for more info. + """ + if hasattr(self, 'utry'): + return self.utry, np.array([]) + + utry, grads = self.gate.get_unitary_and_grad(params) # type: ignore + return np.linalg.matrix_power(utry,power), self.power*np.linalg.matrix_power(utry,self.power-1)@grads + + + def optimize(self, env_matrix: npt.NDArray[np.complex128]) -> list[float]: #TODO fix + """ + Return the optimal parameters with respect to an environment matrix. + + See :class:`LocallyOptimizableUnitary` for more info. + """ + if hasattr(self, 'utry'): + return [] + self.check_env_matrix(env_matrix) + return self.gate.optimize(env_matrix.conj().T) # type: ignore + + def __eq__(self, other: object) -> bool: + return ( + isinstance(other, PowerGate) + and self.gate == other.gate + ) + + def __hash__(self) -> int: + return hash(self.gate) + + def get_inverse(self) -> Gate: + """Return the gate's inverse as a gate.""" + return self.gate From e67b91096094ff44f94eceec80ddebb3ab0f7419 Mon Sep 17 00:00:00 2001 From: ermalrrapaj Date: Thu, 28 Mar 2024 10:52:07 -0700 Subject: [PATCH 048/197] fixed power gate and added tests --- bqskit/ir/gates/__init__.py | 1 + bqskit/ir/gates/composed/powergate.py | 101 ++++++++++++++++++++------ tests/ir/gates/composed/test_power.py | 80 ++++++++++++++++++++ 3 files changed, 158 insertions(+), 24 deletions(-) create mode 100644 tests/ir/gates/composed/test_power.py diff --git a/bqskit/ir/gates/__init__.py b/bqskit/ir/gates/__init__.py index 7505d0d88..c9bf26f41 100644 --- a/bqskit/ir/gates/__init__.py +++ b/bqskit/ir/gates/__init__.py @@ -97,6 +97,7 @@ :template: autosummary/gate.rst ControlledGate + PowerGate DaggerGate EmbeddedGate FrozenParameterGate diff --git a/bqskit/ir/gates/composed/powergate.py b/bqskit/ir/gates/composed/powergate.py index e3c969755..b7b821fca 100644 --- a/bqskit/ir/gates/composed/powergate.py +++ b/bqskit/ir/gates/composed/powergate.py @@ -3,20 +3,22 @@ import numpy as np import numpy.typing as npt +import re + from bqskit.ir.gate import Gate from bqskit.ir.gates.composedgate import ComposedGate +from bqskit.ir.gates.constant.unitary import ConstantUnitaryGate +from bqskit.ir.gates.constant.identity import IdentityGate from bqskit.qis.unitary.differentiable import DifferentiableUnitary -from bqskit.qis.unitary.optimizable import LocallyOptimizableUnitary +from bqskit.ir.gates.composed.daggergate import DaggerGate from bqskit.qis.unitary.unitary import RealVector from bqskit.utils.typing import is_integer from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix from bqskit.utils.docs import building_docs - class PowerGate( ComposedGate, - LocallyOptimizableUnitary, DifferentiableUnitary, ): """ @@ -52,17 +54,19 @@ def __init__(self, gate: Gate, power: int = 1) -> None: self._num_params = gate.num_params self._num_qudits = gate.num_qudits self._radixes = gate.radixes - + # If input is a constant gate, we can cache the unitary. if self.num_params == 0 and not building_docs(): - self.utry = np.linalg.matrix_power(gate.get_unitary(),self.power) + self.utry = np.linalg.matrix_power(self.gate.get_unitary(),self.power) def get_unitary(self, params: RealVector = []) -> UnitaryMatrix: """Return the unitary for this gate, see :class:`Unitary` for more.""" if hasattr(self, 'utry'): return self.utry - - return np.linalg.matrix_power(self.gate.get_unitary(params),power) + + return np.linalg.matrix_power(self.gate.get_unitary(params),self.power) + + def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: """ @@ -78,8 +82,9 @@ def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: if hasattr(self, 'utry'): return np.array([]) - grads = self.gate.get_grad(params) # type: ignore - return self.power*np.linalg.matrix_power(self.gate.get_unitary(params),power-1)@grads + _, grad = self.get_unitary_and_grad(params) + return grad + def get_unitary_and_grad( @@ -93,22 +98,70 @@ def get_unitary_and_grad( """ if hasattr(self, 'utry'): return self.utry, np.array([]) - - utry, grads = self.gate.get_unitary_and_grad(params) # type: ignore - return np.linalg.matrix_power(utry,power), self.power*np.linalg.matrix_power(utry,self.power-1)@grads + + if self.power == 0: + return IdentityGate(radixes=self.gate.radixes).get_unitary(), 0*IdentityGate(radixes=self.gate.radixes).get_unitary() + + #powers = {0: IdentityGate(radixes=self.gate.radixes).get_unitary()} + #grads = {0: 0*IdentityGate(radixes=self.gate.radixes).get_unitary()} + + powers = {} + grads = {} + + # decompose the power as sum of powers of 2 + indexbin=bin(abs(self.power))[2:] + indices=[len(indexbin)-1-xb.start() for xb in re.finditer('1',indexbin)][::-1] + + powers[0], grads[0] = self.gate.get_unitary_and_grad(params) + + # avoid doing computations if not needed + if self.power==1: + return powers[0], grads[0] + + + + + + # check if the power is negative, and + if np.sign(self.power) == -1: + gate = DaggerGate(self.gate) + powers[0], grads[0] = gate.get_unitary_and_grad(params) + + # avoid doing computations if not needed + if abs(self.power)==1: + return powers[0], grads[0] + + + grads[1] = grads[0] @ powers[0] + powers[0] @ grads[0] + powers[1] = powers[0] @ powers[0] + + # avoid doing more computations if not needed + if abs(self.power)==2: + return powers[1], grads[1] + + # loop over powers of 2 + for i in range(2,indices[-1]+1): + powers[i] = powers[i-1] @ powers[i-1] + grads[i] = grads[i-1] @ powers[i-1] + powers[i-1] @ grads[i-1] + + + unitary = powers[indices[0]] + for i in indices[1:]: + unitary = unitary @ powers[indices[i]] + + grad = 0*IdentityGate(radixes=self.gate.radixes).get_unitary() + for i in indices: + grad_tmp = grads[i] + for j in indices: + if ji: + grad_tmp = grad_tmp @ powers[j] + grad = grad + grad_tmp + + return unitary, grad - def optimize(self, env_matrix: npt.NDArray[np.complex128]) -> list[float]: #TODO fix - """ - Return the optimal parameters with respect to an environment matrix. - - See :class:`LocallyOptimizableUnitary` for more info. - """ - if hasattr(self, 'utry'): - return [] - self.check_env_matrix(env_matrix) - return self.gate.optimize(env_matrix.conj().T) # type: ignore - def __eq__(self, other: object) -> bool: return ( isinstance(other, PowerGate) @@ -120,4 +173,4 @@ def __hash__(self) -> int: def get_inverse(self) -> Gate: """Return the gate's inverse as a gate.""" - return self.gate + return DaggerGate(self.gate) diff --git a/tests/ir/gates/composed/test_power.py b/tests/ir/gates/composed/test_power.py new file mode 100644 index 000000000..1c8e56b22 --- /dev/null +++ b/tests/ir/gates/composed/test_power.py @@ -0,0 +1,80 @@ +"""This module tests the PowerGate class.""" +from __future__ import annotations + +from bqskit.ir.gates import PowerGate, DaggerGate +from bqskit.ir.gates import RXGate, RYGate, RZGate +import numpy as np + + +test_power = lambda gate, power, params: np.linalg.matrix_power(gate.get_unitary([params]),power) + +def square_grad(gate, params): + g, gd = gate.get_unitary_and_grad([params]) + return g@gd+gd@g + +def third_power_grad(gate, params): + g, gd = gate.get_unitary_and_grad([params]) + return g @ square_grad(gate, params)+ gd @ test_power(gate, 2, params) + +def quartic_power_grad(gate, params): + g, gd = gate.get_unitary_and_grad([params]) + return g@third_power_grad(gate, params)+gd @ test_power(gate, 3, params) + +def power_gate_grads(gate, power, params): + if power == 2: + return square_grad(gate, params) + elif power == -2: + return square_grad(DaggerGate(gate), params) + elif power == 3: + return third_power_grad(gate, params) + elif power == -3: + return third_power_grad(DaggerGate(gate), params) + elif power == 4: + return quartic_power_grad(gate, power) + elif power == -4: + return quartic_power_grad(DaggerGate(gate), power) + +def test(test_gate, indices, params, error) -> None: + + # test index 1 + for param in params: + pgt, pgdt = test_gate.get_unitary_and_grad([param]) + pgate = PowerGate(test_gate, 1) + pg, pgd = pgate.get_unitary_and_grad([param]) + assert np.sum(abs(pg-pgt)) None: + global error, indices, parames + test(RXGate(), indices, params, error) + +def test_y() -> None: + global error, indices, parames + test(RYGate(), indices, params, error) + +def test_z() -> None: + global error, indices, parames + test(RZGate(), indices, params, error) \ No newline at end of file From 03ea7838cd42328d58b729d443acb142a64b8988 Mon Sep 17 00:00:00 2001 From: ermalrrapaj Date: Thu, 9 May 2024 09:13:24 -0700 Subject: [PATCH 049/197] precommit fixes --- bqskit/ir/gates/composed/__init__.py | 2 +- bqskit/ir/gates/composed/powergate.py | 117 +++++++++++++------------- tests/ir/gates/composed/test_power.py | 59 ++++++++----- 3 files changed, 95 insertions(+), 83 deletions(-) diff --git a/bqskit/ir/gates/composed/__init__.py b/bqskit/ir/gates/composed/__init__.py index 6893552b8..3eb5ac849 100644 --- a/bqskit/ir/gates/composed/__init__.py +++ b/bqskit/ir/gates/composed/__init__.py @@ -3,9 +3,9 @@ from bqskit.ir.gates.composed.controlled import ControlledGate from bqskit.ir.gates.composed.daggergate import DaggerGate -from bqskit.ir.gates.composed.powergate import PowerGate from bqskit.ir.gates.composed.embedded import EmbeddedGate from bqskit.ir.gates.composed.frozenparam import FrozenParameterGate +from bqskit.ir.gates.composed.powergate import PowerGate from bqskit.ir.gates.composed.tagged import TaggedGate from bqskit.ir.gates.composed.vlg import VariableLocationGate diff --git a/bqskit/ir/gates/composed/powergate.py b/bqskit/ir/gates/composed/powergate.py index b7b821fca..bb95adce1 100644 --- a/bqskit/ir/gates/composed/powergate.py +++ b/bqskit/ir/gates/composed/powergate.py @@ -1,21 +1,22 @@ """This module implements the DaggerGate Class.""" from __future__ import annotations -import numpy as np -import numpy.typing as npt import re +import numpy as np +import numpy.typing as npt from bqskit.ir.gate import Gate +from bqskit.ir.gates.composed.daggergate import DaggerGate from bqskit.ir.gates.composedgate import ComposedGate -from bqskit.ir.gates.constant.unitary import ConstantUnitaryGate from bqskit.ir.gates.constant.identity import IdentityGate +from bqskit.ir.gates.constant.unitary import ConstantUnitaryGate from bqskit.qis.unitary.differentiable import DifferentiableUnitary -from bqskit.ir.gates.composed.daggergate import DaggerGate from bqskit.qis.unitary.unitary import RealVector -from bqskit.utils.typing import is_integer from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix from bqskit.utils.docs import building_docs +from bqskit.utils.typing import is_integer + class PowerGate( ComposedGate, @@ -39,34 +40,35 @@ def __init__(self, gate: Gate, power: int = 1) -> None: Args: gate (Gate): The Gate to conjugate transpose. - power (integer): The power index for the PowerGate + power (integer): The power index for the PowerGate """ if not isinstance(gate, Gate): raise TypeError('Expected gate object, got %s' % type(gate)) - + if not is_integer(power): raise TypeError( - f'Expected integer for num_controls, got {type(power)}.',) - + f'Expected integer for num_controls, got {type(power)}.', + ) + self.gate = gate - self.power =power + self.power = power self._name = 'Power(%s)' % gate.name self._num_params = gate.num_params self._num_qudits = gate.num_qudits self._radixes = gate.radixes - + # If input is a constant gate, we can cache the unitary. if self.num_params == 0 and not building_docs(): - self.utry = np.linalg.matrix_power(self.gate.get_unitary(),self.power) + self.utry = np.linalg.matrix_power( + self.gate.get_unitary(), self.power, + ) def get_unitary(self, params: RealVector = []) -> UnitaryMatrix: """Return the unitary for this gate, see :class:`Unitary` for more.""" if hasattr(self, 'utry'): return self.utry - - return np.linalg.matrix_power(self.gate.get_unitary(params),self.power) - - + + return np.linalg.matrix_power(self.gate.get_unitary(params), self.power) def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: """ @@ -82,10 +84,8 @@ def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: if hasattr(self, 'utry'): return np.array([]) - _, grad = self.get_unitary_and_grad(params) + _, grad = self.get_unitary_and_grad(params) return grad - - def get_unitary_and_grad( self, @@ -98,70 +98,67 @@ def get_unitary_and_grad( """ if hasattr(self, 'utry'): return self.utry, np.array([]) - + if self.power == 0: - return IdentityGate(radixes=self.gate.radixes).get_unitary(), 0*IdentityGate(radixes=self.gate.radixes).get_unitary() - - #powers = {0: IdentityGate(radixes=self.gate.radixes).get_unitary()} - #grads = {0: 0*IdentityGate(radixes=self.gate.radixes).get_unitary()} - + return IdentityGate(radixes=self.gate.radixes).get_unitary(), 0 * IdentityGate(radixes=self.gate.radixes).get_unitary() + + # powers = {0: IdentityGate(radixes=self.gate.radixes).get_unitary()} + # grads = {0: 0*IdentityGate(radixes=self.gate.radixes).get_unitary()} + powers = {} grads = {} - + # decompose the power as sum of powers of 2 - indexbin=bin(abs(self.power))[2:] - indices=[len(indexbin)-1-xb.start() for xb in re.finditer('1',indexbin)][::-1] - - powers[0], grads[0] = self.gate.get_unitary_and_grad(params) - - # avoid doing computations if not needed - if self.power==1: + indexbin = bin(abs(self.power))[2:] + indices = [ + len(indexbin) - 1 - xb.start() + for xb in re.finditer('1', indexbin) + ][::-1] + + powers[0], grads[0] = self.gate.get_unitary_and_grad(params) + + # avoid doing computations if not needed + if self.power == 1: return powers[0], grads[0] - - - - - - # check if the power is negative, and + + # check if the power is negative, and if np.sign(self.power) == -1: gate = DaggerGate(self.gate) powers[0], grads[0] = gate.get_unitary_and_grad(params) - - # avoid doing computations if not needed - if abs(self.power)==1: + + # avoid doing computations if not needed + if abs(self.power) == 1: return powers[0], grads[0] - - + grads[1] = grads[0] @ powers[0] + powers[0] @ grads[0] powers[1] = powers[0] @ powers[0] - - # avoid doing more computations if not needed - if abs(self.power)==2: + + # avoid doing more computations if not needed + if abs(self.power) == 2: return powers[1], grads[1] - + # loop over powers of 2 - for i in range(2,indices[-1]+1): - powers[i] = powers[i-1] @ powers[i-1] - grads[i] = grads[i-1] @ powers[i-1] + powers[i-1] @ grads[i-1] - - + for i in range(2, indices[-1] + 1): + powers[i] = powers[i - 1] @ powers[i - 1] + grads[i] = grads[i - 1] @ powers[i - 1] + \ + powers[i - 1] @ grads[i - 1] + unitary = powers[indices[0]] for i in indices[1:]: unitary = unitary @ powers[indices[i]] - - grad = 0*IdentityGate(radixes=self.gate.radixes).get_unitary() + + grad = 0 * IdentityGate(radixes=self.gate.radixes).get_unitary() for i in indices: grad_tmp = grads[i] for j in indices: - if ji: + elif j > i: grad_tmp = grad_tmp @ powers[j] grad = grad + grad_tmp - + return unitary, grad - - + def __eq__(self, other: object) -> bool: return ( isinstance(other, PowerGate) diff --git a/tests/ir/gates/composed/test_power.py b/tests/ir/gates/composed/test_power.py index 1c8e56b22..adccd04da 100644 --- a/tests/ir/gates/composed/test_power.py +++ b/tests/ir/gates/composed/test_power.py @@ -1,24 +1,34 @@ """This module tests the PowerGate class.""" from __future__ import annotations -from bqskit.ir.gates import PowerGate, DaggerGate -from bqskit.ir.gates import RXGate, RYGate, RZGate import numpy as np +from bqskit.ir.gates import DaggerGate +from bqskit.ir.gates import PowerGate +from bqskit.ir.gates import RXGate +from bqskit.ir.gates import RYGate +from bqskit.ir.gates import RZGate + + +test_power = lambda gate, power, params: np.linalg.matrix_power( + gate.get_unitary([params]), power, +) -test_power = lambda gate, power, params: np.linalg.matrix_power(gate.get_unitary([params]),power) def square_grad(gate, params): g, gd = gate.get_unitary_and_grad([params]) - return g@gd+gd@g + return g @ gd + gd @ g + def third_power_grad(gate, params): g, gd = gate.get_unitary_and_grad([params]) - return g @ square_grad(gate, params)+ gd @ test_power(gate, 2, params) + return g @ square_grad(gate, params) + gd @ test_power(gate, 2, params) + def quartic_power_grad(gate, params): g, gd = gate.get_unitary_and_grad([params]) - return g@third_power_grad(gate, params)+gd @ test_power(gate, 3, params) + return g @ third_power_grad(gate, params) + gd @ test_power(gate, 3, params) + def power_gate_grads(gate, power, params): if power == 2: @@ -34,47 +44,52 @@ def power_gate_grads(gate, power, params): elif power == -4: return quartic_power_grad(DaggerGate(gate), power) -def test(test_gate, indices, params, error) -> None: - + +def test(test_gate, indices, params, error) -> None: + # test index 1 for param in params: pgt, pgdt = test_gate.get_unitary_and_grad([param]) pgate = PowerGate(test_gate, 1) pg, pgd = pgate.get_unitary_and_grad([param]) - assert np.sum(abs(pg-pgt)) None: global error, indices, parames test(RXGate(), indices, params, error) - + + def test_y() -> None: global error, indices, parames test(RYGate(), indices, params, error) - + + def test_z() -> None: global error, indices, parames - test(RZGate(), indices, params, error) \ No newline at end of file + test(RZGate(), indices, params, error) From a16c369db342891934a6a5d36fde3850c2d663cb Mon Sep 17 00:00:00 2001 From: ermalrrapaj Date: Thu, 9 May 2024 09:41:38 -0700 Subject: [PATCH 050/197] fixed precommit --- bqskit/ir/gates/composed/powergate.py | 34 ++++++++++++++++++++------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/bqskit/ir/gates/composed/powergate.py b/bqskit/ir/gates/composed/powergate.py index bb95adce1..2ee5866df 100644 --- a/bqskit/ir/gates/composed/powergate.py +++ b/bqskit/ir/gates/composed/powergate.py @@ -10,7 +10,6 @@ from bqskit.ir.gates.composed.daggergate import DaggerGate from bqskit.ir.gates.composedgate import ComposedGate from bqskit.ir.gates.constant.identity import IdentityGate -from bqskit.ir.gates.constant.unitary import ConstantUnitaryGate from bqskit.qis.unitary.differentiable import DifferentiableUnitary from bqskit.qis.unitary.unitary import RealVector from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix @@ -30,7 +29,8 @@ class PowerGate( For example: >>> from bqskit.ir.gates import TGate, TdgGate - >>> PowerGate(TGate(),2).get_unitary() == TdgGate().get_unitary()*TdgGate().get_unitary() + >>> PowerGate(TGate(),2).get_unitary() == + TdgGate().get_unitary()*TdgGate().get_unitary() True """ @@ -59,8 +59,10 @@ def __init__(self, gate: Gate, power: int = 1) -> None: # If input is a constant gate, we can cache the unitary. if self.num_params == 0 and not building_docs(): - self.utry = np.linalg.matrix_power( - self.gate.get_unitary(), self.power, + self.utry = UnitaryMatrix( + np.linalg.matrix_power( + self.gate.get_unitary(), self.power, + ), self._radixes, ) def get_unitary(self, params: RealVector = []) -> UnitaryMatrix: @@ -68,9 +70,18 @@ def get_unitary(self, params: RealVector = []) -> UnitaryMatrix: if hasattr(self, 'utry'): return self.utry - return np.linalg.matrix_power(self.gate.get_unitary(params), self.power) + return UnitaryMatrix( + np.linalg.matrix_power( + self.gate.get_unitary(params), + self.power, + ), + self._radixes, + ) - def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: + def get_grad( + self, + params: RealVector = [], + ) -> npt.NDArray[np.complex128]: """ Return the gradient for this gate. @@ -78,7 +89,8 @@ def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: Notes: The derivative of the integer power of matrix is equal - to the derivative of the matrix multiplied by the integer-1 power of the matrix + to the derivative of the matrix multiplied by + the integer-1 power of the matrix and by the integer power. """ if hasattr(self, 'utry'): @@ -100,7 +112,9 @@ def get_unitary_and_grad( return self.utry, np.array([]) if self.power == 0: - return IdentityGate(radixes=self.gate.radixes).get_unitary(), 0 * IdentityGate(radixes=self.gate.radixes).get_unitary() + ID = IdentityGate(radixes=self.gate.radixes).get_unitary() + ID_GRAD = 0 * IdentityGate(radixes=self.gate.radixes).get_unitary() + return ID, ID_GRAD # powers = {0: IdentityGate(radixes=self.gate.radixes).get_unitary()} # grads = {0: 0*IdentityGate(radixes=self.gate.radixes).get_unitary()} @@ -115,8 +129,10 @@ def get_unitary_and_grad( for xb in re.finditer('1', indexbin) ][::-1] - powers[0], grads[0] = self.gate.get_unitary_and_grad(params) + powers[0], grads[0] = self.gate.get_unitary_and_grad( # type: ignore + params, + ) # avoid doing computations if not needed if self.power == 1: return powers[0], grads[0] From 519c15824321b4c3bf1eae89a5aea94268844fe3 Mon Sep 17 00:00:00 2001 From: ermalrrapaj Date: Thu, 9 May 2024 09:43:59 -0700 Subject: [PATCH 051/197] fixed test --- tests/ir/gates/composed/test_power.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/ir/gates/composed/test_power.py b/tests/ir/gates/composed/test_power.py index adccd04da..6e3e9b1fe 100644 --- a/tests/ir/gates/composed/test_power.py +++ b/tests/ir/gates/composed/test_power.py @@ -1,3 +1,4 @@ +# type: ignore """This module tests the PowerGate class.""" from __future__ import annotations From dbd637d4ebf70fd605724875e9b1f68d0e734fa2 Mon Sep 17 00:00:00 2001 From: Justin Kalloor Date: Wed, 15 May 2024 12:17:07 -0700 Subject: [PATCH 052/197] Resolving Comments --- bqskit/passes/processing/treescan.py | 61 +++++++++++++++++++++------- 1 file changed, 47 insertions(+), 14 deletions(-) diff --git a/bqskit/passes/processing/treescan.py b/bqskit/passes/processing/treescan.py index 0fe0063ef..dbcd53a7b 100644 --- a/bqskit/passes/processing/treescan.py +++ b/bqskit/passes/processing/treescan.py @@ -12,7 +12,7 @@ from bqskit.ir.opt.cost.functions import HilbertSchmidtResidualsGenerator from bqskit.ir.opt.cost.generator import CostFunctionGenerator from bqskit.runtime import get_runtime -from bqskit.utils.typing import is_real_number +from bqskit.utils.typing import is_real_number, is_integer _logger = logging.getLogger(__name__) @@ -21,7 +21,20 @@ class TreeScanningGateRemovalPass(BasePass): """ The ScanningGateRemovalPass class. - Starting from one side of the circuit, attempt to remove gates one-by-one. + Starting from one side of the circuit, run the following: + + Split the circuit operations into chunks of size tree_depth + At every iteration: + a. Look at the next chunk of operations + b. Generate 2 ^ tree_depth circuits. Each circuit corresponds to every combination of whether or not to include one of the operations in the chunk. + c. Instantiate in parallel all 2^tree_depth circuits + d. Choose the circuit that has the least number of operations and move on to the next chunk of operations. + + This optimization is less greedy than the current ScanningGate removal, which we see can offer + much better quality circuits than ScanningGate. In very rare occasions, ScanningGate may + be able to outperform TreeScan (since it is still greedy), but in general we can expect + TreeScan to almost always outperform ScanningGate. + """ def __init__( @@ -56,7 +69,9 @@ def __init__( tree_depth (int): The depth of the tree of potential solutions to instantiate. Note that 2^(tree_depth) - 1 - circuits will be instantiated in parallel. + circuits will be instantiated in parallel. Note that the default + behavior will be equivalent to normal ScanningGateRemoval + (Default: 1) collection_filter (Callable[[Operation], bool] | None): A predicate that determines which operations should be @@ -92,23 +107,34 @@ def __init__( ' collection_filter, got %s.' % type(self.collection_filter), ) + if not is_integer(tree_depth): + raise TypeError( + 'Expected Integer type for tree_depth, got %s.' + % type(instantiate_options), + ) + self.tree_depth = tree_depth self.start_from_left = start_from_left self.success_threshold = success_threshold self.cost = cost self.instantiate_options: dict[str, Any] = { 'dist_tol': self.success_threshold, - 'min_iters': 100, + 'min_iters': 10, 'cost_fn_gen': self.cost, } self.instantiate_options.update(instantiate_options) @staticmethod def get_tree_circs( - orig_num_cycles: int, circuit_copy: Circuit, + orig_num_cycles: int, + circuit_copy: Circuit, cycle_and_ops: list[tuple[int, Operation]], ) -> list[Circuit]: - # Implement recursively for now, if slow then fix + ''' + Given a circuit, create 2^(tree_depth) - 1 circuits that remove up + to tree_depth operations. The circuits are sorted by the number of + operations removed. + ''' all_circs = [circuit_copy.copy()] for cycle, op in cycle_and_ops: new_circs = [] @@ -123,8 +149,8 @@ def get_tree_circs( all_circs = new_circs all_circs = sorted(all_circs, key=lambda x: x.num_operations) - - return all_circs + # Remove circuit with no gates deleted + return all_circs[:-1] async def run(self, circuit: Circuit, data: PassData) -> None: """Perform the pass's operation, see :class:`BasePass` for more.""" @@ -156,15 +182,13 @@ async def run(self, circuit: Circuit, data: PassData) -> None: all_circs = TreeScanningGateRemovalPass.get_tree_circs( circuit.num_cycles, circuit_copy, chunk, ) - # Remove circuit with no gates deleted - all_circs = all_circs[:-1] _logger.debug( - 'Attempting removal of operation of' + 'Attempting removal of operation of up to' f' {self.tree_depth} operations.', ) - instantiated_circuits = await get_runtime().map( + instantiated_circuits: list[Circuit] = await get_runtime().map( Circuit.instantiate, all_circs, target=target, @@ -176,9 +200,18 @@ async def run(self, circuit: Circuit, data: PassData) -> None: # Pick least count with least dist for i, dist in enumerate(dists): if dist < self.success_threshold: + # Log gates removed + gate_dict_orig = circuit_copy.gate_counts + gate_dict_new = instantiated_circuits[i].gate_counts + gates_removed = { + k: circuit_copy.gate_counts[k] - gate_dict_new.get(k, 0) + for k in gate_dict_orig.keys() + } + gates_removed = { + k: v for k, v in gates_removed.items() if v != 0 + } _logger.debug( - f'Successfully switched to circuit {i}' - ' of {2 ** self.tree_depth}.', + f'Successfully removed {gates_removed} gates', ) circuit_copy = instantiated_circuits[i] break From 546e736e44dd880642b6b4f870e8e864f6d4b888 Mon Sep 17 00:00:00 2001 From: Justin Kalloor Date: Wed, 15 May 2024 12:26:46 -0700 Subject: [PATCH 053/197] tox fix --- bqskit/passes/processing/treescan.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/bqskit/passes/processing/treescan.py b/bqskit/passes/processing/treescan.py index dbcd53a7b..230596196 100644 --- a/bqskit/passes/processing/treescan.py +++ b/bqskit/passes/processing/treescan.py @@ -12,7 +12,8 @@ from bqskit.ir.opt.cost.functions import HilbertSchmidtResidualsGenerator from bqskit.ir.opt.cost.generator import CostFunctionGenerator from bqskit.runtime import get_runtime -from bqskit.utils.typing import is_real_number, is_integer +from bqskit.utils.typing import is_integer +from bqskit.utils.typing import is_real_number _logger = logging.getLogger(__name__) @@ -26,15 +27,17 @@ class TreeScanningGateRemovalPass(BasePass): Split the circuit operations into chunks of size tree_depth At every iteration: a. Look at the next chunk of operations - b. Generate 2 ^ tree_depth circuits. Each circuit corresponds to every combination of whether or not to include one of the operations in the chunk. + b. Generate 2 ^ tree_depth circuits. Each circuit corresponds to every + combination of whether or not to include one of the operations in the chunk. c. Instantiate in parallel all 2^tree_depth circuits - d. Choose the circuit that has the least number of operations and move on to the next chunk of operations. + d. Choose the circuit that has the least number of operations and move + on to the next chunk of operations. - This optimization is less greedy than the current ScanningGate removal, which we see can offer - much better quality circuits than ScanningGate. In very rare occasions, ScanningGate may - be able to outperform TreeScan (since it is still greedy), but in general we can expect + This optimization is less greedy than the current ScanningGate removal, + which we see can offermuch better quality circuits than ScanningGate. + In very rare occasions, ScanningGate may be able to outperform + TreeScan (since it is still greedy), but in general we can expect TreeScan to almost always outperform ScanningGate. - """ def __init__( @@ -112,7 +115,7 @@ def __init__( 'Expected Integer type for tree_depth, got %s.' % type(instantiate_options), ) - + self.tree_depth = tree_depth self.start_from_left = start_from_left self.success_threshold = success_threshold @@ -126,7 +129,7 @@ def __init__( @staticmethod def get_tree_circs( - orig_num_cycles: int, + orig_num_cycles: int, circuit_copy: Circuit, cycle_and_ops: list[tuple[int, Operation]], ) -> list[Circuit]: From e3c5e3a19a1433c38044228eec5ef132adf7081f Mon Sep 17 00:00:00 2001 From: Justin Kalloor Date: Wed, 15 May 2024 12:29:56 -0700 Subject: [PATCH 054/197] Adding Doc to init --- bqskit/passes/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/bqskit/passes/__init__.py b/bqskit/passes/__init__.py index f2581e8f9..08ae0429c 100644 --- a/bqskit/passes/__init__.py +++ b/bqskit/passes/__init__.py @@ -43,6 +43,7 @@ ExhaustiveGateRemovalPass IterativeScanningGateRemovalPass ScanningGateRemovalPass + TreeScanningGateRemovalPass SubstitutePass .. rubric:: Retargeting Passes From 2a5ab3807b4f4ba7a18e3b36a477fad6f1d30815 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Fri, 28 Jun 2024 13:53:42 -0700 Subject: [PATCH 055/197] Pauli Z Matrices for generating diagonal unitaries --- bqskit/qis/pauliz.py | 251 +++++++++++++++++++++ tests/qis/test_pauli.py | 470 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 721 insertions(+) create mode 100644 bqskit/qis/pauliz.py diff --git a/bqskit/qis/pauliz.py b/bqskit/qis/pauliz.py new file mode 100644 index 000000000..0519221d9 --- /dev/null +++ b/bqskit/qis/pauliz.py @@ -0,0 +1,251 @@ +"""This module implements the PauliMatrices class.""" +from __future__ import annotations + +import itertools as it +from typing import Iterable +from typing import Iterator +from typing import overload +from typing import Sequence +from typing import TYPE_CHECKING + +import numpy as np +import numpy.typing as npt + +from bqskit.utils.typing import is_integer +from bqskit.utils.typing import is_numeric +from bqskit.utils.typing import is_sequence + +if TYPE_CHECKING: + from bqskit.qis.unitary.unitary import RealVector + + +class PauliZMatrices(Sequence[npt.NDArray[np.complex128]]): + """ + The group of Pauli Z matrices. + + A PauliZMatrices object represents the entire of set of Pauli Z matrices + for some number of qubits. + """ + + Z = np.array( + [ + [1, 0], + [0, -1], + ], dtype=np.complex128, + ) + """The Pauli Z Matrix.""" + + I = np.array( + [ + [1, 0], + [0, 1], + ], dtype=np.complex128, + ) + """The Identity Matrix.""" + + def __init__(self, num_qudits: int) -> None: + """ + Construct the Pauli Z group for `num_qudits` number of qubits. + + Args: + num_qudits (int): Power of the tensor product of the Pauli Z + group. + + Raises: + ValueError: If `num_qudits` is less than or equal to 0. + """ + + if not is_integer(num_qudits): + raise TypeError( + 'Expected integer for num_qudits, got %s.' % + type(num_qudits), + ) + + if num_qudits <= 0: + raise ValueError( + 'Expected positive integer for num_qudits, got %s.' % type( + num_qudits, + ), + ) + + self.num_qudits = num_qudits + + if num_qudits == 1: + self.paulizs = [ + PauliZMatrices.I, + PauliZMatrices.Z, + ] + else: + self.paulizs = [] + matrices = it.product( + PauliZMatrices( + num_qudits - 1, + ), + PauliZMatrices(1), + ) + for pauliz_n_1, pauliz_1 in matrices: + self.paulizs.append(np.kron(pauliz_n_1, pauliz_1)) + + def __iter__(self) -> Iterator[npt.NDArray[np.complex128]]: + return self.paulizs.__iter__() + + @overload + def __getitem__(self, index: int) -> npt.NDArray[np.complex128]: + ... + + @overload + def __getitem__(self, index: slice) -> list[npt.NDArray[np.complex128]]: + ... + + def __getitem__( + self, + index: int | slice, + ) -> npt.NDArray[np.complex128] | list[npt.NDArray[np.complex128]]: + return self.paulizs[index] + + def __len__(self) -> int: + return len(self.paulizs) + + @property + def numpy(self) -> npt.NDArray[np.complex128]: + """The NumPy array holding the pauliz matrices.""" + return np.array(self.paulizs) + + def __array__( + self, + dtype: np.typing.DTypeLike = np.complex128, + ) -> npt.NDArray[np.complex128]: + """Implements NumPy API for the PauliZMatrices class.""" + if dtype != np.complex128: + raise ValueError('PauliZMatrices only supports Complex128 dtype.') + + return np.array(self.paulizs, dtype) + + def get_projection_matrices( + self, q_set: Iterable[int], + ) -> list[npt.NDArray[np.complex128]]: + """ + Return the Pauli Z matrices that act only on qubits in `q_set`. + + Args: + q_set (Iterable[int]): Active qubit indices + + Returns: + list[np.ndarray]: Pauli Z matrices from `self` acting only + on qubits in `q_set`. + + Raises: + ValueError: if `q_set` is an invalid set of qubit indices. + """ + q_set = list(q_set) + + if not all(is_integer(q) for q in q_set): + raise TypeError('Expected sequence of integers for qubit indices.') + + if any(q < 0 or q >= self.num_qudits for q in q_set): + raise ValueError('Qubit indices must be in [0, n).') + + if len(q_set) != len(set(q_set)): + raise ValueError('Qubit indices cannot have duplicates.') + + # Nth Order Pauli Z Matrices can be thought of base 2 number + # I = 0, Z = 1 + # IZZ = 1 * 2^2 + 1 * 2^1 + 0 * 4^0 = 6 (base 10) + # This gives the idx of IZZ in paulizs + # Note we read qubit index from the left, + # so Z in ZII corresponds to q = 0 + pauliz_n_qubit = [] + for ps in it.product([0, 1], repeat=len(q_set)): + idx = 0 + for p, q in zip(ps, q_set): + idx += p * (2 ** (self.num_qudits - q - 1)) + pauliz_n_qubit.append(self.paulizs[idx]) + + return pauliz_n_qubit + + def dot_product(self, alpha: RealVector) -> npt.NDArray[np.complex128]: + """ + Computes the standard dot product of `alpha` with the paulis. + + Args: + alpha (RealVector): The pauli coefficients. + + Returns: + np.ndarray: Sum of element-wise multiplication of `alpha` + and `self.paulis`. + + Raises: + ValueError: If `alpha` and `self.paulis` are incompatible. + """ + + if not is_sequence(alpha) or not all(is_numeric(a) for a in alpha): + msg = f'Expected a sequence of numbers, got {type(alpha)}.' + raise TypeError(msg) + + if len(alpha) != len(self): + msg = ( + 'Incorrect number of alpha values, expected ' + f'{len(self)}, got {len(alpha)}.' + ) + raise ValueError(msg) + + return np.array(np.sum([a * s for a, s in zip(alpha, self.paulizs)], 0)) + + @staticmethod + def from_string( + pauliz_string: str, + ) -> npt.NDArray[np.complex128] | list[npt.NDArray[np.complex128]]: + """ + Construct Pauli Z matrices from a string description. + + Args: + pauli_string (str): A string that describes the desired matrices. + This is a comma-seperated list of pauli strings. + A pauli string has the following regex pattern: [IZ]+ + + Returns: + np.ndarray | list[np.ndarray]: Either the single pauli Z matrix + if only one is constructed, or the list of the constructed + pauli Z matrices. + + Raises: + ValueError: if `pauliz_string` is invalid. + """ + + if not isinstance(pauliz_string, str): + msg = f'Expected str for pauliz_string, got {type(pauliz_string)}.' + raise TypeError(msg) + + pauliz_strings = [ + string.strip().upper() + for string in pauliz_string.split(',') + if len(string.strip()) > 0 + ] + + pauliz_matrices = [] + idx_dict = {'I': 0, 'Z': 1} + mat_dict = { + 'I': PauliZMatrices.I, + 'Z': PauliZMatrices.Z, + } + + for pauli_string in pauliz_strings: + if not all(char in 'IZ' for char in pauli_string): + raise ValueError('Invalid Pauli Z string.') + + if len(pauli_string) <= 6: + idx = 0 + for char in pauli_string: + idx *= 2 + idx += idx_dict[char] + pauliz_matrices.append(PauliZMatrices(len(pauli_string))[idx]) + else: + acm = mat_dict[pauli_string[0]] + for char in pauli_string[1:]: + acm = np.kron(acm, mat_dict[char]) + pauliz_matrices.append(acm) + + if len(pauliz_matrices) == 1: + return pauliz_matrices[0] + + return pauliz_matrices diff --git a/tests/qis/test_pauli.py b/tests/qis/test_pauli.py index 8e8c3ef4f..261566821 100644 --- a/tests/qis/test_pauli.py +++ b/tests/qis/test_pauli.py @@ -10,6 +10,7 @@ from hypothesis.strategies import integers from bqskit.qis.pauli import PauliMatrices +from bqskit.qis.pauliz import PauliZMatrices from bqskit.qis.unitary.unitary import RealVector from bqskit.utils.test.types import invalid_type_test from bqskit.utils.test.types import valid_type_test @@ -800,3 +801,472 @@ def test_multi( assert all(isinstance(pauli, np.ndarray) for pauli in paulis) assert len(paulis) == len(pauli_mats) assert all(self.in_array(pauli, pauli_mats) for pauli in paulis) + + +class TestPauliZMatricesConstructor: + def in_array(self, needle: Any, haystack: Any) -> bool: + for elem in haystack: + if np.allclose(elem, needle): + return True + + return False + + @invalid_type_test(PauliZMatrices) + def test_invalid_type(self) -> None: + pass + + @given(integers(max_value=-1)) + def test_invalid_value(self, size: int) -> None: + with pytest.raises(ValueError): + PauliZMatrices(size) + + def test_size_1(self) -> None: + num_qubits = 1 + paulis = PauliZMatrices(num_qubits) + assert len(paulis) == 2 ** num_qubits + + I = np.array([[1, 0], [0, 1]], dtype=np.complex128) + Z = np.array([[1, 0], [0, -1]], dtype=np.complex128) + + assert self.in_array(I, paulis) + assert self.in_array(Z, paulis) + + def test_size_2(self) -> None: + num_qubits = 2 + paulis = PauliZMatrices(num_qubits) + assert len(paulis) == 2 ** num_qubits + + I = np.array([[1, 0], [0, 1]], dtype=np.complex128) + Z = np.array([[1, 0], [0, -1]], dtype=np.complex128) + + assert self.in_array(np.kron(Z, Z), paulis) + assert self.in_array(np.kron(Z, I), paulis) + assert self.in_array(np.kron(I, Z), paulis) + assert self.in_array(np.kron(I, I), paulis) + + def test_size_3(self) -> None: + num_qubits = 3 + paulis = PauliZMatrices(num_qubits) + assert len(paulis) == 2 ** num_qubits + + I = np.array([[1, 0], [0, 1]], dtype=np.complex128) + Z = np.array([[1, 0], [0, -1]], dtype=np.complex128) + + assert self.in_array(np.kron(Z, np.kron(Z, Z)), paulis) + assert self.in_array(np.kron(Z, np.kron(Z, I)), paulis) + assert self.in_array(np.kron(Z, np.kron(I, Z)), paulis) + assert self.in_array(np.kron(Z, np.kron(I, I)), paulis) + assert self.in_array(np.kron(I, np.kron(Z, Z)), paulis) + assert self.in_array(np.kron(I, np.kron(Z, I)), paulis) + assert self.in_array(np.kron(I, np.kron(I, Z)), paulis) + assert self.in_array(np.kron(I, np.kron(I, I)), paulis) + + +class TestPauliZMatricesGetProjectionMatrices: + def in_array(self, needle: Any, haystack: Any) -> bool: + for elem in haystack: + if np.allclose(elem, needle): + return True + + return False + + @valid_type_test(PauliZMatrices(1).get_projection_matrices) + def test_valid_type(self) -> None: + pass + + @invalid_type_test(PauliZMatrices(1).get_projection_matrices) + def test_invalid_type(self) -> None: + pass + + @pytest.mark.parametrize('invalid_qubit', [-5, -2, 4, 10]) + def test_invalid_value_1(self, invalid_qubit: int) -> None: + paulis = PauliZMatrices(4) + with pytest.raises(ValueError): + paulis.get_projection_matrices([invalid_qubit]) + + @pytest.mark.parametrize('invalid_q_set', [[0, 0], [0, 1, 2, 4]]) + def test_invalid_value_2(self, invalid_q_set: list[int]) -> None: + paulis = PauliZMatrices(4) + with pytest.raises(ValueError): + paulis.get_projection_matrices(invalid_q_set) + + def test_proj_3_0(self) -> None: + num_qubits = 3 + qubit_proj = 0 + paulis = PauliZMatrices(num_qubits) + projs = paulis.get_projection_matrices([qubit_proj]) + assert len(projs) == 2 + + I = np.array([[1, 0], [0, 1]], dtype=np.complex128) + Z = np.array([[1, 0], [0, -1]], dtype=np.complex128) + + assert self.in_array(np.kron(np.kron(Z, I), I), projs) + assert self.in_array(np.kron(np.kron(I, I), I), projs) + + def test_proj_3_1(self) -> None: + num_qubits = 3 + qubit_proj = 1 + paulis = PauliZMatrices(num_qubits) + projs = paulis.get_projection_matrices([qubit_proj]) + assert len(projs) == 2 + + I = np.array([[1, 0], [0, 1]], dtype=np.complex128) + Z = np.array([[1, 0], [0, -1]], dtype=np.complex128) + + assert self.in_array(np.kron(np.kron(I, Z), I), projs) + assert self.in_array(np.kron(np.kron(I, I), I), projs) + + def test_proj_3_2(self) -> None: + num_qubits = 3 + qubit_proj = 2 + paulis = PauliZMatrices(num_qubits) + projs = paulis.get_projection_matrices([qubit_proj]) + assert len(projs) == 2 + + I = np.array([[1, 0], [0, 1]], dtype=np.complex128) + Z = np.array([[1, 0], [0, -1]], dtype=np.complex128) + + assert self.in_array(np.kron(np.kron(I, I), Z), projs) + assert self.in_array(np.kron(np.kron(I, I), I), projs) + + def test_proj_4_0(self) -> None: + num_qubits = 4 + qubit_proj = 0 + paulis = PauliZMatrices(num_qubits) + projs = paulis.get_projection_matrices([qubit_proj]) + assert len(projs) == 2 + + I = np.array([[1, 0], [0, 1]], dtype=np.complex128) + Z = np.array([[1, 0], [0, -1]], dtype=np.complex128) + + assert self.in_array(np.kron(np.kron(np.kron(Z, I), I), I), projs) + assert self.in_array(np.kron(np.kron(np.kron(I, I), I), I), projs) + + def test_proj_4_1(self) -> None: + num_qubits = 4 + qubit_proj = 1 + paulis = PauliZMatrices(num_qubits) + projs = paulis.get_projection_matrices([qubit_proj]) + assert len(projs) == 2 + + I = np.array([[1, 0], [0, 1]], dtype=np.complex128) + Z = np.array([[1, 0], [0, -1]], dtype=np.complex128) + + assert self.in_array(np.kron(np.kron(np.kron(I, Z), I), I), projs) + assert self.in_array(np.kron(np.kron(np.kron(I, I), I), I), projs) + + def test_proj_4_2(self) -> None: + num_qubits = 4 + qubit_proj = 2 + paulis = PauliZMatrices(num_qubits) + projs = paulis.get_projection_matrices([qubit_proj]) + assert len(projs) == 2 + + I = np.array([[1, 0], [0, 1]], dtype=np.complex128) + Z = np.array([[1, 0], [0, -1]], dtype=np.complex128) + + assert self.in_array(np.kron(np.kron(np.kron(I, I), Z), I), projs) + assert self.in_array(np.kron(np.kron(np.kron(I, I), I), I), projs) + + def test_proj_4_3(self) -> None: + num_qubits = 4 + qubit_proj = 3 + paulis = PauliZMatrices(num_qubits) + projs = paulis.get_projection_matrices([qubit_proj]) + assert len(projs) == 2 + + I = np.array([[1, 0], [0, 1]], dtype=np.complex128) + Z = np.array([[1, 0], [0, -1]], dtype=np.complex128) + + assert self.in_array(np.kron(np.kron(np.kron(I, I), I), Z), projs) + assert self.in_array(np.kron(np.kron(np.kron(I, I), I), I), projs) + + def test_proj_3_01(self) -> None: + num_qubits = 3 + qubit_pro1 = 0 + qubit_pro2 = 1 + paulis = PauliZMatrices(num_qubits) + projs = paulis.get_projection_matrices([qubit_pro1, qubit_pro2]) + assert len(projs) == 4 + + I = np.array([[1, 0], [0, 1]], dtype=np.complex128) + Z = np.array([[1, 0], [0, -1]], dtype=np.complex128) + + assert self.in_array(np.kron(np.kron(Z, I), I), projs) + assert self.in_array(np.kron(np.kron(I, I), I), projs) + assert self.in_array(np.kron(np.kron(Z, Z), I), projs) + assert self.in_array(np.kron(np.kron(I, Z), I), projs) + + def test_proj_3_02(self) -> None: + num_qubits = 3 + qubit_pro1 = 0 + qubit_pro2 = 2 + paulis = PauliZMatrices(num_qubits) + projs = paulis.get_projection_matrices([qubit_pro1, qubit_pro2]) + assert len(projs) == 4 + + I = np.array([[1, 0], [0, 1]], dtype=np.complex128) + Z = np.array([[1, 0], [0, -1]], dtype=np.complex128) + + assert self.in_array(np.kron(np.kron(Z, I), I), projs) + assert self.in_array(np.kron(np.kron(I, I), I), projs) + assert self.in_array(np.kron(np.kron(Z, I), Z), projs) + assert self.in_array(np.kron(np.kron(I, I), Z), projs) + + def test_proj_3_12(self) -> None: + num_qubits = 3 + qubit_pro1 = 1 + qubit_pro2 = 2 + paulis = PauliZMatrices(num_qubits) + projs = paulis.get_projection_matrices([qubit_pro1, qubit_pro2]) + assert len(projs) == 4 + + I = np.array([[1, 0], [0, 1]], dtype=np.complex128) + Z = np.array([[1, 0], [0, -1]], dtype=np.complex128) + + assert self.in_array(np.kron(np.kron(I, Z), I), projs) + assert self.in_array(np.kron(np.kron(I, I), I), projs) + assert self.in_array(np.kron(np.kron(I, Z), Z), projs) + assert self.in_array(np.kron(np.kron(I, I), Z), projs) + + def test_proj_3_012(self) -> None: + num_qubits = 3 + paulis = PauliZMatrices(num_qubits) + projs = paulis.get_projection_matrices([0, 1, 2]) + assert len(projs) == 8 + + I = np.array([[1, 0], [0, 1]], dtype=np.complex128) + Z = np.array([[1, 0], [0, -1]], dtype=np.complex128) + + assert self.in_array(np.kron(np.kron(I, Z), I), projs) + assert self.in_array(np.kron(np.kron(I, I), I), projs) + assert self.in_array(np.kron(np.kron(I, Z), Z), projs) + assert self.in_array(np.kron(np.kron(I, I), Z), projs) + + assert self.in_array(np.kron(np.kron(Z, Z), I), projs) + assert self.in_array(np.kron(np.kron(Z, I), I), projs) + assert self.in_array(np.kron(np.kron(Z, Z), Z), projs) + assert self.in_array(np.kron(np.kron(Z, I), Z), projs) + + def test_proj_4_02(self) -> None: + num_qubits = 4 + qubit_pro1 = 0 + qubit_pro2 = 2 + paulis = PauliZMatrices(num_qubits) + projs = paulis.get_projection_matrices([qubit_pro1, qubit_pro2]) + assert len(projs) == 4 + + I = np.array([[1, 0], [0, 1]], dtype=np.complex128) + Z = np.array([[1, 0], [0, -1]], dtype=np.complex128) + + assert self.in_array(np.kron(np.kron(np.kron(Z, I), I), I), projs) + assert self.in_array(np.kron(np.kron(np.kron(I, I), I), I), projs) + assert self.in_array(np.kron(np.kron(np.kron(Z, I), Z), I), projs) + assert self.in_array(np.kron(np.kron(np.kron(I, I), Z), I), projs) + + +class TestPauliZMatricesDotProduct: + @pytest.mark.parametrize('invalid_alpha', [[1.1] * i for i in range(2)]) + def test_invalid_value(self, invalid_alpha: RealVector) -> None: + with pytest.raises(ValueError): + PauliZMatrices(1).dot_product(invalid_alpha) + + @pytest.mark.parametrize( + 'alpha, prod', [ + ([1, 0], PauliZMatrices.I), + ([0, 1], PauliZMatrices.Z), + ([1, 1], PauliZMatrices.I + PauliZMatrices.Z), + ], + ) + def test_size_1(self, alpha: RealVector, prod: npt.NDArray[Any]) -> None: + assert np.allclose(PauliZMatrices(1).dot_product(alpha), prod) + + @pytest.mark.parametrize( + 'alpha, prod', [ + ( + [1, 0, 0, 0], + np.kron(PauliZMatrices.I, PauliZMatrices.I), + ), + ( + [0, 1, 0, 0], + np.kron(PauliZMatrices.I, PauliZMatrices.Z), + ), + ( + [0, 0, 1, 0], + np.kron(PauliZMatrices.Z, PauliZMatrices.I), + ), + ( + [0, 0, 0, 1], + np.kron(PauliZMatrices.Z, PauliZMatrices.Z), + ), + ( + [1, 0, 0, 1], + np.kron(PauliZMatrices.I, PauliZMatrices.I) + + np.kron(PauliZMatrices.Z, PauliZMatrices.Z), + ), + ( + [1.8, 0, 0, 91.7], + 1.8 * np.kron(PauliZMatrices.I, PauliZMatrices.I) + + 91.7 * np.kron(PauliZMatrices.Z, PauliZMatrices.Z), + ), + ], + ) + def test_size_2( + self, alpha: RealVector, + prod: npt.NDArray[np.complex128], + ) -> None: + assert np.allclose(PauliZMatrices(2).dot_product(alpha), prod) + + +class TestPauliZMatricesFromString: + def in_array(self, needle: Any, haystack: Any) -> bool: + for elem in haystack: + if not needle.shape == elem.shape: + continue + if np.allclose(elem, needle): + return True + + return False + + @valid_type_test(PauliZMatrices.from_string) + def test_valid_type(self) -> None: + pass + + @invalid_type_test(PauliZMatrices.from_string) + def test_invalid_type(self) -> None: + pass + + @pytest.mark.parametrize( + 'invalid_str', [ + 'ABC', + 'IXYZA', + '\t AIXYZ ,, \n\r\tabc\t', + 'IXYZ+', + 'IXYZ, IXA', + 'WXYZ, XYZ', + ], + ) + def test_invalid_value(self, invalid_str: str) -> None: + with pytest.raises(ValueError): + PauliZMatrices.from_string(invalid_str) + + @pytest.mark.parametrize( + 'pauli_str, pauli_mat', [ + ( + 'IZZ', + np.kron( + np.kron( + PauliZMatrices.I, + PauliZMatrices.Z, + ), + PauliZMatrices.Z, + ), + ), + ( + 'ZIZ', + np.kron( + np.kron( + PauliZMatrices.Z, + PauliZMatrices.I, + ), + PauliZMatrices.Z, + ), + ), + ( + 'ZZI', + np.kron( + np.kron( + PauliZMatrices.Z, + PauliZMatrices.Z, + ), + PauliZMatrices.I, + ), + ), + ('\t ZZ ,,\n\r\t\t', np.kron(PauliZMatrices.Z, PauliZMatrices.Z)), + ], + ) + def test_single( + self, + pauli_str: str, + pauli_mat: npt.NDArray[np.complex128], + ) -> None: + assert isinstance(PauliZMatrices.from_string(pauli_str), np.ndarray) + assert np.allclose( + np.array(PauliZMatrices.from_string(pauli_str)), + pauli_mat, + ) + + @pytest.mark.parametrize( + 'pauli_str, pauli_mats', [ + ( + 'IIZ, IIZ', [ + np.kron( + np.kron( + PauliZMatrices.I, + PauliZMatrices.I, + ), + PauliZMatrices.Z, + ), + np.kron( + np.kron( + PauliZMatrices.I, + PauliZMatrices.I, + ), + PauliZMatrices.Z, + ), + ], + ), + ( + 'ZIZ, ZZI', [ + np.kron( + np.kron( + PauliZMatrices.Z, + PauliZMatrices.I, + ), + PauliZMatrices.Z, + ), + np.kron( + np.kron( + PauliZMatrices.Z, + PauliZMatrices.Z, + ), + PauliZMatrices.I, + ), + ], + ), + ( + 'IIZ, IZI, ZZZ', [ + np.kron( + np.kron( + PauliZMatrices.I, + PauliZMatrices.I, + ), + PauliZMatrices.Z, + ), + np.kron( + np.kron( + PauliZMatrices.I, + PauliZMatrices.Z, + ), + PauliZMatrices.I, + ), + np.kron( + np.kron( + PauliZMatrices.Z, + PauliZMatrices.Z, + ), + PauliZMatrices.Z, + ), + ], + ), + ], + ) + def test_multi( + self, pauli_str: str, + pauli_mats: list[npt.NDArray[np.complex128]], + ) -> None: + paulis = PauliZMatrices.from_string(pauli_str) + assert isinstance(paulis, list) + assert all(isinstance(pauli, np.ndarray) for pauli in paulis) + assert len(paulis) == len(pauli_mats) + assert all(self.in_array(pauli, pauli_mats) for pauli in paulis) From 886f4f6576840c7c58efa8b83a289bf0b5a2c9c6 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Fri, 28 Jun 2024 13:54:16 -0700 Subject: [PATCH 056/197] Operations for PauliZ --- bqskit/utils/math.py | 31 +++++++++++++++++++++++++++++++ tests/utils/test_math.py | 17 +++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/bqskit/utils/math.py b/bqskit/utils/math.py index a95d76518..9fa193319 100644 --- a/bqskit/utils/math.py +++ b/bqskit/utils/math.py @@ -8,6 +8,7 @@ import scipy as sp from bqskit.qis.pauli import PauliMatrices +from bqskit.qis.pauliz import PauliZMatrices from bqskit.qis.unitary.unitary import RealVector @@ -168,6 +169,36 @@ def pauli_expansion(H: npt.NDArray[np.complex128]) -> npt.NDArray[np.float64]: return np.array(X) +def pauliz_expansion(H: npt.NDArray[np.complex128]) -> npt.NDArray[np.float64]: + """ + Computes a Pauli Z expansion of the diagonal hermitian matrix H. + + Args: + H (np.ndarray): The diagonal hermitian matrix to expand. + + Returns: + np.ndarray: The coefficients of a Pauli Z expansion for H, + i.e., X dot Sigma = H where Sigma contains Pauli Z matrices of + same size of H. + + Note: + This assumes the input is diagonal. No check is done for hermicity. + The output is undefined on non-hermitian inputs. + """ + diag_H = np.diag(np.diag(H)) + if not np.allclose(H, diag_H): + msg = 'H must be a diagonal matrix.' + raise ValueError(msg) + # Change basis of H to Pauli Basis (solve for coefficients -> X) + n = int(np.log2(len(H))) + paulizs = PauliZMatrices(n) + flatten_paulizs = [np.diag(pauli) for pauli in paulizs] + flatten_H = np.diag(H) + A = np.stack(flatten_paulizs, axis=-1) + X = np.real(np.matmul(np.linalg.inv(A), flatten_H)) + return np.array(X) + + def compute_su_generators(n: int) -> npt.NDArray[np.complex128]: """ Computes the Lie algebra generators for SU(n). diff --git a/tests/utils/test_math.py b/tests/utils/test_math.py index 0f21167dd..4b8e697d6 100644 --- a/tests/utils/test_math.py +++ b/tests/utils/test_math.py @@ -10,10 +10,12 @@ from scipy.stats import unitary_group from bqskit.qis.pauli import PauliMatrices +from bqskit.qis.pauliz import PauliZMatrices from bqskit.utils.math import canonical_unitary from bqskit.utils.math import dexpmv from bqskit.utils.math import dot_product from bqskit.utils.math import pauli_expansion +from bqskit.utils.math import pauliz_expansion from bqskit.utils.math import softmax from bqskit.utils.math import unitary_log_no_i @@ -188,6 +190,21 @@ def test_valid(self, reH: npt.NDArray[np.complex128]) -> None: assert np.linalg.norm(H - reH) < 1e-16 +class TestPauliZExpansion: + @pytest.mark.parametrize( + 'reH', + PauliZMatrices(1).paulizs + + PauliZMatrices(2).paulizs + + PauliZMatrices(3).paulizs + + PauliZMatrices(4).paulizs, + ) + def test_valid(self, reH: npt.NDArray[np.complex128]) -> None: + alpha = pauliz_expansion(reH) + print(alpha) + H = PauliZMatrices(int(np.log2(reH.shape[0]))).dot_product(alpha) + assert np.linalg.norm(H - reH) < 1e-16 + + class TestCanonicalUnitary: @pytest.mark.parametrize( 'phase, num_qudits', From fa816e50a7a208c8dd0eb7c630bd86e27098e644 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Fri, 28 Jun 2024 13:54:52 -0700 Subject: [PATCH 057/197] PauliZGate for diagonal unitaries --- bqskit/ir/gates/parameterized/pauliz.py | 103 ++++++++++++++++++++ tests/ir/gates/parameterized/test_pauliz.py | 61 ++++++++++++ 2 files changed, 164 insertions(+) create mode 100644 bqskit/ir/gates/parameterized/pauliz.py create mode 100644 tests/ir/gates/parameterized/test_pauliz.py diff --git a/bqskit/ir/gates/parameterized/pauliz.py b/bqskit/ir/gates/parameterized/pauliz.py new file mode 100644 index 000000000..12d51e2f2 --- /dev/null +++ b/bqskit/ir/gates/parameterized/pauliz.py @@ -0,0 +1,103 @@ +"""This module implements the PauliZGate.""" +from __future__ import annotations + +from typing import Any + +import numpy as np +import numpy.typing as npt +import scipy as sp + +from bqskit.ir.gates.generalgate import GeneralGate +from bqskit.ir.gates.qubitgate import QubitGate +from bqskit.qis.pauliz import PauliZMatrices +from bqskit.qis.unitary.differentiable import DifferentiableUnitary +from bqskit.qis.unitary.unitary import RealVector +from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix +from bqskit.utils.docs import building_docs +from bqskit.utils.math import dexpmv +from bqskit.utils.math import dot_product +from bqskit.utils.math import pauliz_expansion +from bqskit.utils.math import unitary_log_no_i + + +class PauliZGate(QubitGate, DifferentiableUnitary, GeneralGate): + """ + A gate representing an arbitrary diagonal rotation. + + This gate is given by: + + .. math:: + + \\exp({i(\\vec{\\alpha} \\cdot \\vec{\\sigma_Z^{\\otimes n}})}) + + Where :math:`\\vec{\\alpha}` are the gate's parameters, + :math:`\\vec{\\sigma}` are the PauliZ Z matrices, + and :math:`n` is the number of qubits this gate acts on. + """ + + def __init__(self, num_qudits: int) -> None: + """ + Create a PauliZGate acting on `num_qudits` qubits. + + Args: + num_qudits (int): The number of qudits this gate will act on. + + Raises: + ValueError: If `num_qudits` is nonpositive. + """ + + if num_qudits <= 0: + raise ValueError(f'Expected positive integer, got {num_qudits}') + + self._name = f'PauliZGate({num_qudits})' + self._num_qudits = num_qudits + self.paulis = PauliZMatrices(self.num_qudits) + self._num_params = len(self.paulis) + if building_docs(): + self.sigmav: npt.NDArray[Any] = np.array([]) + else: + self.sigmav = (-1j / 2) * self.paulis.numpy + + def get_unitary(self, params: RealVector = []) -> UnitaryMatrix: + """Return the unitary for this gate, see :class:`Unitary` for more.""" + self.check_parameters(params) + H = dot_product(params, self.sigmav) + eiH = sp.linalg.expm(H) + return UnitaryMatrix(eiH, check_arguments=False) + + def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: + """ + Return the gradient for this gate. + + See :class:`DifferentiableUnitary` for more info. + """ + self.check_parameters(params) + + H = dot_product(params, self.sigmav) + _, dU = dexpmv(H, self.sigmav) + return dU + + def get_unitary_and_grad( + self, + params: RealVector = [], + ) -> tuple[UnitaryMatrix, npt.NDArray[np.complex128]]: + """ + Return the unitary and gradient for this gate. + + See :class:`DifferentiableUnitary` for more info. + """ + self.check_parameters(params) + + H = dot_product(params, self.sigmav) + U, dU = dexpmv(H, self.sigmav) + return UnitaryMatrix(U, check_arguments=False), dU + + def calc_params(self, utry: UnitaryMatrix) -> list[float]: + """Return the parameters for this gate to implement `utry`""" + return list(-2 * pauliz_expansion(unitary_log_no_i(utry.numpy))) + + def __eq__(self, o: object) -> bool: + return isinstance(o, PauliZGate) and self.num_qudits == o.num_qudits + + def __hash__(self) -> int: + return hash((self.__class__.__name__, self.num_qudits)) diff --git a/tests/ir/gates/parameterized/test_pauliz.py b/tests/ir/gates/parameterized/test_pauliz.py new file mode 100644 index 000000000..61ee78811 --- /dev/null +++ b/tests/ir/gates/parameterized/test_pauliz.py @@ -0,0 +1,61 @@ +"""This module tests the PauliZGate class.""" +from __future__ import annotations + +import numpy as np +import pytest +from hypothesis import given +from hypothesis.strategies import floats +from hypothesis.strategies import integers + +from bqskit.ir.gates import IdentityGate +from bqskit.ir.gates import PauliZGate +from bqskit.ir.gates import RZGate +from bqskit.ir.gates import RZZGate +from bqskit.utils.test.strategies import num_qudits + + +class TestInit: + @given(num_qudits(4)) + def test_valid(self, num_qudits: int) -> None: + g = PauliZGate(num_qudits) + assert g.num_qudits == num_qudits + assert g.num_params == 2 ** num_qudits + identity = np.identity(2 ** num_qudits) + assert g.get_unitary([0] * 2 ** num_qudits) == identity + + @given(integers(max_value=0)) + def test_invalid(self, num_qudits: int) -> None: + with pytest.raises(ValueError): + PauliZGate(num_qudits) + + +class TestGetUnitary: + @given(floats(allow_nan=False, allow_infinity=False, width=16)) + def test_i(self, angle: float) -> None: + g = PauliZGate(1) + i = IdentityGate(1).get_unitary() + dist = g.get_unitary([angle, 0]).get_distance_from(i) + assert dist < 1e-7 + + @given(floats(allow_nan=False, allow_infinity=False, width=16)) + def test_z(self, angle: float) -> None: + g = PauliZGate(1) + z = RZGate() + assert g.get_unitary([0, angle]) == z.get_unitary([angle]) + + @given(floats(allow_nan=False, allow_infinity=False, width=16)) + def test_zz(self, angle: float) -> None: + g = PauliZGate(2) + zz = RZZGate() + params = [0.0] * 4 + params[3] = angle + assert g.get_unitary(params) == zz.get_unitary([angle]) + + +@given(floats(allow_nan=False, allow_infinity=False, width=16)) +def test_optimize(angle: float) -> None: + g = PauliZGate(1) + z = RZGate() + utry = z.get_unitary([angle]) + params = g.optimize(np.array(utry)) + assert g.get_unitary(params).get_distance_from(utry.conj().T) < 1e-7 From 9c980277d25baa1245fc65d20506027f44487ee3 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Fri, 28 Jun 2024 13:55:16 -0700 Subject: [PATCH 058/197] Added PauliZ to __init__ --- bqskit/ir/gates/__init__.py | 1 + bqskit/ir/gates/parameterized/__init__.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/bqskit/ir/gates/__init__.py b/bqskit/ir/gates/__init__.py index 1c9a83cbb..4187decb5 100644 --- a/bqskit/ir/gates/__init__.py +++ b/bqskit/ir/gates/__init__.py @@ -73,6 +73,7 @@ CUGate FSIMGate PauliGate + PauliZGate PhasedXZGate RSU3Gate RXGate diff --git a/bqskit/ir/gates/parameterized/__init__.py b/bqskit/ir/gates/parameterized/__init__.py index 3ee2c645c..546520b60 100644 --- a/bqskit/ir/gates/parameterized/__init__.py +++ b/bqskit/ir/gates/parameterized/__init__.py @@ -12,6 +12,7 @@ from bqskit.ir.gates.parameterized.cu import CUGate from bqskit.ir.gates.parameterized.fsim import FSIMGate from bqskit.ir.gates.parameterized.pauli import PauliGate +from bqskit.ir.gates.parameterized.pauliz import PauliZGate from bqskit.ir.gates.parameterized.phasedxz import PhasedXZGate from bqskit.ir.gates.parameterized.rsu3 import RSU3Gate from bqskit.ir.gates.parameterized.rx import RXGate @@ -41,6 +42,7 @@ 'CUGate', 'FSIMGate', 'PauliGate', + 'PauliZGate', 'PhasedXZGate', 'RSU3Gate', 'RXGate', From f155efcd7a26646755200089eeedeaa5a3c0361e Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Mon, 1 Jul 2024 12:29:27 -0700 Subject: [PATCH 059/197] Added diagonal distance --- bqskit/utils/math.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/bqskit/utils/math.py b/bqskit/utils/math.py index 9fa193319..9aa7df1ed 100644 --- a/bqskit/utils/math.py +++ b/bqskit/utils/math.py @@ -288,3 +288,24 @@ def canonical_unitary( correction_phase = 0 - std_phase std_correction = np.exp(1j * correction_phase) return std_correction * special_unitary + + +def diagonal_distance(unitary: npt.NDArray[np.complex128]) -> float: + """ + Compute how diagonal a unitary is. + + The diagonal distance measures how closely a unitary can be approx- + imately inverted by a diagonal unitary. A unitary is approximately + inverted when the Hilbert-Schmidt distance to the identity is less + than some threshold. + + Args: + unitary (np.ndarray): The unitary matrix to check. + + Returns: + float: The Hilbert-Schmidt distance to the nearest diagonal. + """ + eps = unitary - np.diag(np.diag(unitary.numpy)) + eps2 = eps * eps.conj() + distance = abs(np.sqrt(eps2.sum(-1).max())) + return distance \ No newline at end of file From 00843e35494872d38deec8e800a6670bcab141a1 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Mon, 1 Jul 2024 12:54:57 -0700 Subject: [PATCH 060/197] Tests for diagonal distance --- bqskit/utils/math.py | 2 +- tests/utils/test_math.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/bqskit/utils/math.py b/bqskit/utils/math.py index 9aa7df1ed..db675403e 100644 --- a/bqskit/utils/math.py +++ b/bqskit/utils/math.py @@ -305,7 +305,7 @@ def diagonal_distance(unitary: npt.NDArray[np.complex128]) -> float: Returns: float: The Hilbert-Schmidt distance to the nearest diagonal. """ - eps = unitary - np.diag(np.diag(unitary.numpy)) + eps = unitary - np.diag(np.diag(unitary)) eps2 = eps * eps.conj() distance = abs(np.sqrt(eps2.sum(-1).max())) return distance \ No newline at end of file diff --git a/tests/utils/test_math.py b/tests/utils/test_math.py index 4b8e697d6..f1a8d23f0 100644 --- a/tests/utils/test_math.py +++ b/tests/utils/test_math.py @@ -13,6 +13,7 @@ from bqskit.qis.pauliz import PauliZMatrices from bqskit.utils.math import canonical_unitary from bqskit.utils.math import dexpmv +from bqskit.utils.math import diagonal_distance from bqskit.utils.math import dot_product from bqskit.utils.math import pauli_expansion from bqskit.utils.math import pauliz_expansion @@ -223,3 +224,32 @@ def test_canonical_unitary( phased_unitary = phase * base_unitary recanon_unitary = canonical_unitary(phased_unitary) assert np.allclose(canon_unitary, recanon_unitary, atol=1e-5) + + +class TestDiagonalDistance: + @pytest.mark.parametrize( + 'num_qudits, epsilon, threshold_list', + [ + (n, 10 ** -e, [10 ** -t for t in range(1, 10)]) + for n in range(1, 4) + for e in range(1, 10) + ], + ) + def test_diagonal_distance( + self, + num_qudits: int, + epsilon: float, + threshold_list: list[float], + ) -> None: + N = 2 ** num_qudits + off_diag = epsilon / (N - 1) + on_diag = 1 - epsilon + matrix = -off_diag * np.ones((N, N), dtype=np.complex128) + np.fill_diagonal(matrix, on_diag) + + for threshold in threshold_list: + distance = diagonal_distance(matrix) + if epsilon <= threshold: + assert distance <= threshold + else: + assert distance > threshold \ No newline at end of file From ff22669d7a552593a3c8148fe03e53ebcfc6b701 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Mon, 1 Jul 2024 13:21:56 -0700 Subject: [PATCH 061/197] Diagonal predicate --- bqskit/passes/control/predicates/diagonal.py | 39 +++++++++++++ .../control/predicates/test_diagonal.py | 56 +++++++++++++++++++ 2 files changed, 95 insertions(+) create mode 100644 bqskit/passes/control/predicates/diagonal.py create mode 100644 tests/passes/control/predicates/test_diagonal.py diff --git a/bqskit/passes/control/predicates/diagonal.py b/bqskit/passes/control/predicates/diagonal.py new file mode 100644 index 000000000..b25f05259 --- /dev/null +++ b/bqskit/passes/control/predicates/diagonal.py @@ -0,0 +1,39 @@ +"""This module implements the DiagonalPredicate class.""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +from bqskit.passes.control.predicate import PassPredicate +from bqskit.utils.math import diagonal_distance + +if TYPE_CHECKING: + from bqskit.compiler.passdata import PassData + from bqskit.ir.circuit import Circuit + + +class DiagonalPredicate(PassPredicate): + """ + The DiagonalPredicate class. + + The DiagonalPredicate class returns True if the circuit's unitary can + be approximately inverted by a diagonal unitary. A unitary is approx- + imately inverted when the Hilbert-Schmidt distance to the identity is + less than some threshold. + """ + + def __init__(self, threshold: float) -> None: + """ + Construct a DiagonalPredicate. + + Args: + threshold (float): If a circuit can be approximately inverted + by a diagonal unitary (meaning the Hilbert-Schmidt distance + to the identity is less than or equal to this number after + multiplying by the diagonal unitary), True is returned. + """ + self.threshold = threshold + + def get_truth_value(self, circuit: Circuit, data: PassData) -> bool: + """Call this predicate, see :class:`PassPredicate` for more info.""" + dist = diagonal_distance(circuit.get_unitary()) + return dist <= self.threshold diff --git a/tests/passes/control/predicates/test_diagonal.py b/tests/passes/control/predicates/test_diagonal.py new file mode 100644 index 000000000..9d010b6fd --- /dev/null +++ b/tests/passes/control/predicates/test_diagonal.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from hypothesis import given +from hypothesis.strategies import integers + +import numpy as np + +from itertools import combinations + +from random import choices + +from bqskit.compiler.passdata import PassData +from bqskit.ir.circuit import Circuit +from bqskit.ir.gates import CNOTGate +from bqskit.ir.gates import HGate +from bqskit.ir.gates import RZGate +from bqskit.ir.gates import SXGate +from bqskit.passes.control.predicates.diagonal import DiagonalPredicate + + +def phase_gadget() -> Circuit: + gadget = Circuit(2) + gadget.append_gate(CNOTGate(), (0, 1)) + gadget.append_gate(RZGate(), (1), [np.random.normal()]) + gadget.append_gate(CNOTGate(), (0, 1)) + return gadget + +@given(integers(2, 6), integers(0, 10)) +def test_diagonal_predicate(num_qudits: int, num_gadgets: int) -> None: + circuit = Circuit(num_qudits) + all_locations = list(combinations(range(num_qudits), r=2)) + locations = choices(all_locations, k=num_gadgets) + for location in locations: + circuit.append_circuit(phase_gadget(), location) + data = PassData(circuit) + pred = DiagonalPredicate(1e-5) + assert pred.get_truth_value(circuit, data) == True + + circuit.append_gate(HGate(), (0)) + data = PassData(circuit) + assert pred.get_truth_value(circuit, data) == False + +@given(integers(1, 10)) +def test_single_qubit_diagonal_predicate(exponent: int) -> None: + angle = 10 ** - exponent + circuit = Circuit(1) + circuit.append_gate(RZGate(), (0), [angle]) + circuit.append_gate(SXGate(), (0)) + circuit.append_gate(RZGate(), (0), [np.random.normal()]) + circuit.append_gate(SXGate(), (0)) + circuit.append_gate(RZGate(), (0), [angle]) + + pred = DiagonalPredicate(1e-5) + data = PassData(circuit) + # This is true by the small angle approximation + pred.get_truth_value(circuit, data) == (angle < 1e-5) \ No newline at end of file From ff23b385d9529781f8bf768bd0fcb27c24fe1637 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Mon, 1 Jul 2024 13:26:26 -0700 Subject: [PATCH 062/197] pre-commit fixes --- bqskit/passes/control/predicates/diagonal.py | 10 ++++----- bqskit/qis/pauliz.py | 4 ++-- bqskit/utils/math.py | 4 ++-- .../control/predicates/test_diagonal.py | 21 ++++++++++--------- tests/utils/test_math.py | 2 +- 5 files changed, 21 insertions(+), 20 deletions(-) diff --git a/bqskit/passes/control/predicates/diagonal.py b/bqskit/passes/control/predicates/diagonal.py index b25f05259..0b584fd57 100644 --- a/bqskit/passes/control/predicates/diagonal.py +++ b/bqskit/passes/control/predicates/diagonal.py @@ -15,10 +15,10 @@ class DiagonalPredicate(PassPredicate): """ The DiagonalPredicate class. - The DiagonalPredicate class returns True if the circuit's unitary can - be approximately inverted by a diagonal unitary. A unitary is approx- - imately inverted when the Hilbert-Schmidt distance to the identity is - less than some threshold. + The DiagonalPredicate class returns True if the circuit's unitary can be + approximately inverted by a diagonal unitary. A unitary is approx- imately + inverted when the Hilbert-Schmidt distance to the identity is less than some + threshold. """ def __init__(self, threshold: float) -> None: @@ -35,5 +35,5 @@ def __init__(self, threshold: float) -> None: def get_truth_value(self, circuit: Circuit, data: PassData) -> bool: """Call this predicate, see :class:`PassPredicate` for more info.""" - dist = diagonal_distance(circuit.get_unitary()) + dist = diagonal_distance(circuit.get_unitary().numpy) return dist <= self.threshold diff --git a/bqskit/qis/pauliz.py b/bqskit/qis/pauliz.py index 0519221d9..32d0a01f6 100644 --- a/bqskit/qis/pauliz.py +++ b/bqskit/qis/pauliz.py @@ -23,8 +23,8 @@ class PauliZMatrices(Sequence[npt.NDArray[np.complex128]]): """ The group of Pauli Z matrices. - A PauliZMatrices object represents the entire of set of Pauli Z matrices - for some number of qubits. + A PauliZMatrices object represents the entire of set of Pauli Z matrices for + some number of qubits. """ Z = np.array( diff --git a/bqskit/utils/math.py b/bqskit/utils/math.py index db675403e..839be0441 100644 --- a/bqskit/utils/math.py +++ b/bqskit/utils/math.py @@ -301,11 +301,11 @@ def diagonal_distance(unitary: npt.NDArray[np.complex128]) -> float: Args: unitary (np.ndarray): The unitary matrix to check. - + Returns: float: The Hilbert-Schmidt distance to the nearest diagonal. """ eps = unitary - np.diag(np.diag(unitary)) eps2 = eps * eps.conj() distance = abs(np.sqrt(eps2.sum(-1).max())) - return distance \ No newline at end of file + return distance diff --git a/tests/passes/control/predicates/test_diagonal.py b/tests/passes/control/predicates/test_diagonal.py index 9d010b6fd..f20f32009 100644 --- a/tests/passes/control/predicates/test_diagonal.py +++ b/tests/passes/control/predicates/test_diagonal.py @@ -1,14 +1,12 @@ from __future__ import annotations -from hypothesis import given -from hypothesis.strategies import integers - -import numpy as np - from itertools import combinations - from random import choices +import numpy as np +from hypothesis import given +from hypothesis.strategies import integers + from bqskit.compiler.passdata import PassData from bqskit.ir.circuit import Circuit from bqskit.ir.gates import CNOTGate @@ -25,6 +23,7 @@ def phase_gadget() -> Circuit: gadget.append_gate(CNOTGate(), (0, 1)) return gadget + @given(integers(2, 6), integers(0, 10)) def test_diagonal_predicate(num_qudits: int, num_gadgets: int) -> None: circuit = Circuit(num_qudits) @@ -34,11 +33,13 @@ def test_diagonal_predicate(num_qudits: int, num_gadgets: int) -> None: circuit.append_circuit(phase_gadget(), location) data = PassData(circuit) pred = DiagonalPredicate(1e-5) - assert pred.get_truth_value(circuit, data) == True + + is_diagonal = True + assert pred.get_truth_value(circuit, data) == is_diagonal circuit.append_gate(HGate(), (0)) - data = PassData(circuit) - assert pred.get_truth_value(circuit, data) == False + assert not pred.get_truth_value(circuit, data) == is_diagonal + @given(integers(1, 10)) def test_single_qubit_diagonal_predicate(exponent: int) -> None: @@ -53,4 +54,4 @@ def test_single_qubit_diagonal_predicate(exponent: int) -> None: pred = DiagonalPredicate(1e-5) data = PassData(circuit) # This is true by the small angle approximation - pred.get_truth_value(circuit, data) == (angle < 1e-5) \ No newline at end of file + pred.get_truth_value(circuit, data) == (angle < 1e-5) diff --git a/tests/utils/test_math.py b/tests/utils/test_math.py index f1a8d23f0..2e9036b51 100644 --- a/tests/utils/test_math.py +++ b/tests/utils/test_math.py @@ -252,4 +252,4 @@ def test_diagonal_distance( if epsilon <= threshold: assert distance <= threshold else: - assert distance > threshold \ No newline at end of file + assert distance > threshold From 873868c9a63cc6e8e90987a4c9c055279f25c06b Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Mon, 1 Jul 2024 13:28:50 -0700 Subject: [PATCH 063/197] Docstring fix --- bqskit/passes/control/predicates/diagonal.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/passes/control/predicates/diagonal.py b/bqskit/passes/control/predicates/diagonal.py index 0b584fd57..0d72ca7f4 100644 --- a/bqskit/passes/control/predicates/diagonal.py +++ b/bqskit/passes/control/predicates/diagonal.py @@ -16,7 +16,7 @@ class DiagonalPredicate(PassPredicate): The DiagonalPredicate class. The DiagonalPredicate class returns True if the circuit's unitary can be - approximately inverted by a diagonal unitary. A unitary is approx- imately + approximately inverted by a diagonal unitary. A unitary is approximately inverted when the Hilbert-Schmidt distance to the identity is less than some threshold. """ From 88a84d4f47153e0e13c6a812986f0662b20daddd Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Tue, 2 Jul 2024 10:32:15 -0700 Subject: [PATCH 064/197] DiagonalSynthesisPass --- bqskit/passes/synthesis/diagonal.py | 112 ++++++++++++++++++++++++ tests/passes/synthesis/test_diagonal.py | 85 ++++++++++++++++++ 2 files changed, 197 insertions(+) create mode 100644 bqskit/passes/synthesis/diagonal.py create mode 100644 tests/passes/synthesis/test_diagonal.py diff --git a/bqskit/passes/synthesis/diagonal.py b/bqskit/passes/synthesis/diagonal.py new file mode 100644 index 000000000..c7d7190c0 --- /dev/null +++ b/bqskit/passes/synthesis/diagonal.py @@ -0,0 +1,112 @@ +"""This module implements the DiagonalSynthesisPass.""" +from __future__ import annotations + +import logging + +from typing import Any + +from numpy import where + +from bqskit.compiler.passdata import PassData +from bqskit.ir.circuit import Circuit +from bqskit.ir.gates import CNOTGate +from bqskit.ir.gates import RZGate +from bqskit.passes.synthesis.synthesis import SynthesisPass +from bqskit.qis.state.state import StateVector +from bqskit.qis.state.system import StateSystem +from bqskit.qis.unitary import UnitaryMatrix +from bqskit.utils.math import unitary_log_no_i +from bqskit.utils.math import pauliz_expansion + + +_logger = logging.getLogger(__name__) + + +class DiagonalSynthesisPass(SynthesisPass): + """ + A pass that synthesizes diagonal unitaries. + """ + + def __init__( + self, + parameter_precision: float = 1e-8, + ) -> None: + """ + Constructor for DiagonalSynthesisPass. + + Args: + parameter_precision (float): Pauli strings with parameter values + less than this are rounded to zero. (Default: 1e-8) + + TODO: + - Optimize Pauli string ordering + - Cancel adjacent CNOTs + """ + self.parameter_precision = parameter_precision + + def gray_code(self, number: int) -> int: + """Convert a number to its Gray code representation.""" + gray = number ^ (number >> 1) + return gray + + def pauli_to_subcircuit( + self, + string_id: int, + angle: float, + num_qubits: int, + ) -> Circuit: + string = bin(string_id)[2:].zfill(num_qubits) + circuit = Circuit(num_qubits) + locations = [i for i in range(num_qubits) if string[i] == '1'] + if len(locations) == 1: + circuit.append_gate(RZGate(), locations[0], [angle]) + elif len(locations) > 1: + pairs = [ + (locations[i], locations[i+1]) + for i in range(len(locations) - 1) + ] + for pair in pairs: + circuit.append_gate(CNOTGate(), pair) + circuit.append_gate(RZGate(), locations[-1], [angle]) + for pair in reversed(pairs): + circuit.append_gate(CNOTGate(), pair) + return circuit + + async def synthesize( + self, + utry: UnitaryMatrix | StateVector | StateSystem, + data: PassData, + ) -> Circuit: + """Synthesize `utry`, see :class:`SynthesisPass` for more.""" + if not isinstance(utry, UnitaryMatrix): + m = 'DiagonalSynthesisPass can only synthesize diagonal, ' + m += f'UnitaryMatrixs, got {type(utry)}.' + raise TypeError(m) + + if not utry.is_qubit_only(): + m = 'DiagonalSynthesisPass can only synthesize diagonal ' + m += 'UnitaryMatrixs with qubits, got higher radix than 2.' + raise ValueError(m) + + num_qubits = utry.num_qudits + circuit = Circuit(num_qubits) + + # Find parameters of each I/Z Pauli string + H_matrix = unitary_log_no_i(utry) + params = pauliz_expansion(H_matrix) * 2 + # Remove low weight terms - these are likely numerical errors + params = where(abs(params) < self.parameter_precision, 0, params) + + # Order the Pauli strings by their Gray code representation + pauli_params = sorted( + [(i, -p) for i, p in enumerate(params)], + key=lambda x: self.gray_code(x[0]) + ) + subcircuits = [ + self.pauli_to_subcircuit(i, p, num_qubits) for i, p in pauli_params + ] + + for subcircuit in subcircuits: + circuit.append_circuit(subcircuit, [_ for _ in range(num_qubits)]) + + return circuit diff --git a/tests/passes/synthesis/test_diagonal.py b/tests/passes/synthesis/test_diagonal.py new file mode 100644 index 000000000..248271306 --- /dev/null +++ b/tests/passes/synthesis/test_diagonal.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from numpy.random import normal + +from scipy.linalg import expm + +from bqskit.compiler import Compiler +from bqskit.ir.circuit import Circuit +from bqskit.qis import UnitaryMatrix + +from bqskit.qis.pauliz import PauliZMatrices +from bqskit.passes.synthesis.diagonal import DiagonalSynthesisPass + + +class TestDiagonalSynthesis: + + def test_1_qubit(self, compiler: Compiler) -> None: + num_qubits = 1 + pauliz = PauliZMatrices(num_qubits) + vector = [normal() for _ in range(len(pauliz))] + H_matrix = pauliz.dot_product(vector) + utry = UnitaryMatrix(expm(1j * H_matrix)) + + circuit = Circuit.from_unitary(utry) + synthesis = DiagonalSynthesisPass() + circuit = compiler.compile(circuit, [synthesis]) + dist = circuit.get_unitary().get_distance_from(utry) + + assert dist <= 1e-5 + + def test_2_qubit(self, compiler: Compiler) -> None: + num_qubits = 2 + pauliz = PauliZMatrices(num_qubits) + vector = [normal() for _ in range(len(pauliz))] + H_matrix = pauliz.dot_product(vector) + utry = UnitaryMatrix(expm(1j * H_matrix)) + + circuit = Circuit.from_unitary(utry) + synthesis = DiagonalSynthesisPass() + circuit = compiler.compile(circuit, [synthesis]) + dist = circuit.get_unitary().get_distance_from(utry) + + assert dist <= 1e-5 + + def test_3_qubit(self, compiler: Compiler) -> None: + num_qubits = 3 + pauliz = PauliZMatrices(num_qubits) + vector = [normal() for _ in range(len(pauliz))] + H_matrix = pauliz.dot_product(vector) + utry = UnitaryMatrix(expm(1j * H_matrix)) + + circuit = Circuit.from_unitary(utry) + synthesis = DiagonalSynthesisPass() + circuit = compiler.compile(circuit, [synthesis]) + dist = circuit.get_unitary().get_distance_from(utry) + + assert dist <= 1e-5 + + def test_4_qubit(self, compiler: Compiler) -> None: + num_qubits = 4 + pauliz = PauliZMatrices(num_qubits) + vector = [normal() for _ in range(len(pauliz))] + H_matrix = pauliz.dot_product(vector) + utry = UnitaryMatrix(expm(1j * H_matrix)) + + circuit = Circuit.from_unitary(utry) + synthesis = DiagonalSynthesisPass() + circuit = compiler.compile(circuit, [synthesis]) + dist = circuit.get_unitary().get_distance_from(utry) + + assert dist <= 1e-5 + + def test_5_qubit(self, compiler: Compiler) -> None: + num_qubits = 5 + pauliz = PauliZMatrices(num_qubits) + vector = [normal() for _ in range(len(pauliz))] + H_matrix = pauliz.dot_product(vector) + utry = UnitaryMatrix(expm(1j * H_matrix)) + + circuit = Circuit.from_unitary(utry) + synthesis = DiagonalSynthesisPass() + circuit = compiler.compile(circuit, [synthesis]) + dist = circuit.get_unitary().get_distance_from(utry) + + assert dist <= 1e-5 \ No newline at end of file From 5b8392ce6a42c4f6f51a7aeb10729f0aa7ffe774 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Tue, 2 Jul 2024 10:45:44 -0700 Subject: [PATCH 065/197] Documentation for DiagonalSynthesisPass --- bqskit/passes/__init__.py | 2 ++ bqskit/passes/synthesis/__init__.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/bqskit/passes/__init__.py b/bqskit/passes/__init__.py index 8362c9161..b21a45a30 100644 --- a/bqskit/passes/__init__.py +++ b/bqskit/passes/__init__.py @@ -28,6 +28,7 @@ :toctree: autogen :recursive: + DiagonalSynthesisPass LEAPSynthesisPass QSearchSynthesisPass QFASTDecompositionPass @@ -319,6 +320,7 @@ 'ScanPartitioner', 'QuickPartitioner', 'SynthesisPass', + 'DiagonalSynthesisPass', 'LEAPSynthesisPass', 'QSearchSynthesisPass', 'QFASTDecompositionPass', diff --git a/bqskit/passes/synthesis/__init__.py b/bqskit/passes/synthesis/__init__.py index 0d4e49c6b..6b7abe330 100644 --- a/bqskit/passes/synthesis/__init__.py +++ b/bqskit/passes/synthesis/__init__.py @@ -1,6 +1,7 @@ """This package implements synthesis passes and synthesis related classes.""" from __future__ import annotations +from bqskit.passes.synthesis.diagonal import DiagonalSynthesisPass from bqskit.passes.synthesis.leap import LEAPSynthesisPass from bqskit.passes.synthesis.pas import PermutationAwareSynthesisPass from bqskit.passes.synthesis.qfast import QFASTDecompositionPass @@ -10,6 +11,7 @@ from bqskit.passes.synthesis.target import SetTargetPass __all__ = [ + 'DiagonalSynthesisPass', 'LEAPSynthesisPass', 'QFASTDecompositionPass', 'QPredictDecompositionPass', From 6d71816d8594bc85aac16e5fde77e5849070b18e Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Tue, 2 Jul 2024 10:46:08 -0700 Subject: [PATCH 066/197] Pre-commit --- bqskit/passes/synthesis/diagonal.py | 22 +++++++++------------- tests/passes/synthesis/test_diagonal.py | 6 ++---- 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/bqskit/passes/synthesis/diagonal.py b/bqskit/passes/synthesis/diagonal.py index c7d7190c0..73a03a6c2 100644 --- a/bqskit/passes/synthesis/diagonal.py +++ b/bqskit/passes/synthesis/diagonal.py @@ -3,8 +3,6 @@ import logging -from typing import Any - from numpy import where from bqskit.compiler.passdata import PassData @@ -15,17 +13,15 @@ from bqskit.qis.state.state import StateVector from bqskit.qis.state.system import StateSystem from bqskit.qis.unitary import UnitaryMatrix -from bqskit.utils.math import unitary_log_no_i from bqskit.utils.math import pauliz_expansion +from bqskit.utils.math import unitary_log_no_i _logger = logging.getLogger(__name__) class DiagonalSynthesisPass(SynthesisPass): - """ - A pass that synthesizes diagonal unitaries. - """ + """A pass that synthesizes diagonal unitaries.""" def __init__( self, @@ -37,13 +33,13 @@ def __init__( Args: parameter_precision (float): Pauli strings with parameter values less than this are rounded to zero. (Default: 1e-8) - + TODO: - Optimize Pauli string ordering - Cancel adjacent CNOTs """ self.parameter_precision = parameter_precision - + def gray_code(self, number: int) -> int: """Convert a number to its Gray code representation.""" gray = number ^ (number >> 1) @@ -62,7 +58,7 @@ def pauli_to_subcircuit( circuit.append_gate(RZGate(), locations[0], [angle]) elif len(locations) > 1: pairs = [ - (locations[i], locations[i+1]) + (locations[i], locations[i + 1]) for i in range(len(locations) - 1) ] for pair in pairs: @@ -82,17 +78,17 @@ async def synthesize( m = 'DiagonalSynthesisPass can only synthesize diagonal, ' m += f'UnitaryMatrixs, got {type(utry)}.' raise TypeError(m) - + if not utry.is_qubit_only(): m = 'DiagonalSynthesisPass can only synthesize diagonal ' m += 'UnitaryMatrixs with qubits, got higher radix than 2.' raise ValueError(m) - + num_qubits = utry.num_qudits circuit = Circuit(num_qubits) # Find parameters of each I/Z Pauli string - H_matrix = unitary_log_no_i(utry) + H_matrix = unitary_log_no_i(utry.numpy) params = pauliz_expansion(H_matrix) * 2 # Remove low weight terms - these are likely numerical errors params = where(abs(params) < self.parameter_precision, 0, params) @@ -100,7 +96,7 @@ async def synthesize( # Order the Pauli strings by their Gray code representation pauli_params = sorted( [(i, -p) for i, p in enumerate(params)], - key=lambda x: self.gray_code(x[0]) + key=lambda x: self.gray_code(x[0]), ) subcircuits = [ self.pauli_to_subcircuit(i, p, num_qubits) for i, p in pauli_params diff --git a/tests/passes/synthesis/test_diagonal.py b/tests/passes/synthesis/test_diagonal.py index 248271306..5e2c557a4 100644 --- a/tests/passes/synthesis/test_diagonal.py +++ b/tests/passes/synthesis/test_diagonal.py @@ -1,15 +1,13 @@ from __future__ import annotations from numpy.random import normal - from scipy.linalg import expm from bqskit.compiler import Compiler from bqskit.ir.circuit import Circuit +from bqskit.passes.synthesis.diagonal import DiagonalSynthesisPass from bqskit.qis import UnitaryMatrix - from bqskit.qis.pauliz import PauliZMatrices -from bqskit.passes.synthesis.diagonal import DiagonalSynthesisPass class TestDiagonalSynthesis: @@ -82,4 +80,4 @@ def test_5_qubit(self, compiler: Compiler) -> None: circuit = compiler.compile(circuit, [synthesis]) dist = circuit.get_unitary().get_distance_from(utry) - assert dist <= 1e-5 \ No newline at end of file + assert dist <= 1e-5 From fe0492ed593d207af7542d4cca963f4c12d6faae Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 3 Jul 2024 13:27:35 -0400 Subject: [PATCH 067/197] Init Surround Filter --- bqskit/ir/circuit.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index d58b299a7..e1685fe20 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -2096,9 +2096,10 @@ def surround( num_qudits: int, bounding_region: CircuitRegionLike | None = None, fail_quickly: bool = False, + filter: Callable[[Circuit.surround.Node], bool] | None = None, ) -> CircuitRegion: """ - Retrieve the maximal region in this circuit with `point` included. + Retrieve the maximal connected region in this circuit with `point`. Args: point (CircuitPointLike): Find a surrounding region for this @@ -2114,6 +2115,12 @@ def surround( result in some cases at the cost of only approximating the maximal region. + filter (Callable[[], bool] | None): The filter function determines + if a candidate region is valid. This is used to prune the + search space of the surround function. If None then no + filtering is done. It takes a surround search node and + returns a boolean. See the code for more type information. + Raises: IndexError: If `point` is not a valid index. @@ -2124,6 +2131,8 @@ def surround( ValueError: If `bounding_region` is invalid. + ValueError: If the initial node does not pass the filter. + Notes: This algorithm explores outward horizontally as much as possible. When a gate is encountered that involves another qudit not @@ -2143,6 +2152,9 @@ def surround( f'Expected a positive integer num_qudits, got {num_qudits}.', ) + if filter is not None and not callable(filter): + raise TypeError(f'Expected callable filter, got {type(filter)}.') + if bounding_region is not None: bounding_region = CircuitRegion(bounding_region) @@ -2194,6 +2206,9 @@ def surround( {CircuitPoint(point[0], q) for q in init_op.location}, ) + if filter is not None and not filter(init_node): + raise ValueError('Initial node does not pass filter.') + frontier: list[Node] = [init_node] # Track best so far @@ -2210,6 +2225,10 @@ def score(node: Node) -> int: _logger.debug(node[0]) _logger.debug(f'Items remaining in the frontier: {len(frontier)}') + if filter is not None and not filter(node): + _logger.debug('Node failed filter; skipping.') + continue + # Evaluate node if score(node) > best_score: # Calculate region from best node and return From 44040ff33b356650d388a4fad3e895c8ba813cca Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 3 Jul 2024 13:56:51 -0400 Subject: [PATCH 068/197] Documentation --- bqskit/ir/circuit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index e1685fe20..c8429cf87 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -2170,7 +2170,7 @@ def surround( A HalfWire is a point in the circuit and a direction. This represents a point to start exploring from and a direction to - explore in. + explore in. The direction is either 'left' or 'right'. """ Node = Tuple[ From ac135ae77d645b51b22a095e8992caaf40a8bbb2 Mon Sep 17 00:00:00 2001 From: baobach Date: Fri, 5 Jul 2024 13:24:41 -0400 Subject: [PATCH 069/197] Modifying surround function --- bqskit/ir/circuit.py | 43 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 5 deletions(-) diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index c8429cf87..876183f18 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -2216,10 +2216,12 @@ def score(node: Node) -> int: return sum(op[1].num_qudits for op in node[1]) best_score = score(init_node) + _logger.debug(f'best_score: {best_score}') best_region = self.get_region({(point[0], init_op.location[0])}) # Exhaustive Search while len(frontier) > 0: + #_logger.debug(f'Current frontiers:{frontier}') node = frontier.pop(0) _logger.debug('popped node:') _logger.debug(node[0]) @@ -2227,10 +2229,12 @@ def score(node: Node) -> int: if filter is not None and not filter(node): _logger.debug('Node failed filter; skipping.') + _logger.debug(f'Node failed location: {node[2]}') continue # Evaluate node - if score(node) > best_score: + _logger.debug(f'current_score: {score(node)}') + if score(node) > best_score: # or (score(node) == best_score and len(node[2]) > len(best_region)): # Calculate region from best node and return points = {(cycle, op.location[0]) for cycle, op in node[1]} @@ -2243,13 +2247,36 @@ def score(node: Node) -> int: except ValueError: if fail_quickly: continue + # Bao's comment: The reason why I considering the node where the score is at least the same with + # the best score is due to the need to expand the region to at least the number of qubits that we required + # Why this works: as the filtering now limit the region to what we want, the node that we assuming should + # satisfy the filtering and bounded by the region that we want. Therefore, by using union to take into account + # all regions, we hope to cover the amount of qubits that we want + + # elif score(node) == best_score and len(best_region) < num_qudits: + # # Calculate region from best node and return + # points = {(cycle, op.location[0]) for cycle, op in node[1]} + # try: + # new_region = self.get_region(points) + # _logger.debug(f'new region: {new_region}') + # # If two region is different, merge them. As the region is bounded, we do not have the case where two + # # regions has no overlap + # if new_region.location != best_region.location: + # best_region = best_region.union(new_region) + # best_score = score(node) + # _logger.debug(f'new best: {best_region}.') + # + # # Need to reject bad regions + # except ValueError: + # if fail_quickly: + # continue # Expand node absorbed_gates: set[tuple[int, Operation]] = set() branches: set[tuple[int, int, Operation]] = set() before_branch_half_wires: dict[int, HalfWire] = {} for i, half_wire in enumerate(node[0]): - + _logger.debug(f"Exploring {half_wire} .....") cycle_index, qudit_index = half_wire[0] step = -1 if half_wire[1] == 'left' else 1 @@ -2269,8 +2296,9 @@ def score(node: Node) -> int: # Stop when exploring previously explored points point = CircuitPoint(cycle_index, qudit_index) - if point in node[3]: - break + # if point in node[3]: + # _logger.debug(f"Skipping op {self[point]} because previously seen.") + # break node[3].add(point) # Continue until next operation @@ -2299,6 +2327,7 @@ def score(node: Node) -> int: break # Otherwise branch on the operation + _logger.debug(f"Adding {(i, cycle_index, op)} to branch") branches.add((i, cycle_index, op)) # Track state of half wire right before branch @@ -2308,7 +2337,7 @@ def score(node: Node) -> int: # Compute children and extend frontier for half_wire_index, cycle_index, op in branches: - + _logger.debug(f"Expanding branch {(half_wire_index, cycle_index, op)}") child_half_wires = [ half_wire for i, half_wire in before_branch_half_wires.items() @@ -2330,6 +2359,7 @@ def score(node: Node) -> int: expansion = left_expansion + right_expansion # Branch/Gate not taken + _logger.debug(f"Branch/gate not taken: {(child_half_wires, node[1] | absorbed_gates, node[2], node[3])}") frontier.append(( child_half_wires, node[1] | absorbed_gates, @@ -2339,6 +2369,8 @@ def score(node: Node) -> int: # Branch/Gate taken op_points = {CircuitPoint(cycle_index, q) for q in op.location} + _logger.debug( + f"Branch/Gate taken: {(list(set(child_half_wires + expansion)), node[1] | absorbed_gates | {(cycle_index, op)}, node[2].union(op.location), node[3] | op_points)}") frontier.append(( list(set(child_half_wires + expansion)), node[1] | absorbed_gates | {(cycle_index, op)}, @@ -2349,6 +2381,7 @@ def score(node: Node) -> int: # Append terminal node to handle absorbed gates with no branches if len(node[1] | absorbed_gates) != len(node[1]): frontier.append(([], node[1] | absorbed_gates, *node[2:])) + _logger.debug(f"Terminal node {frontier[-1]}") return best_region From e72a6e46de88d8e0441fb514f9e4da3970adaead Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Tue, 9 Jul 2024 10:37:06 -0400 Subject: [PATCH 070/197] Implemented Communicate Feature --- bqskit/runtime/detached.py | 3 +++ bqskit/runtime/manager.py | 3 +++ bqskit/runtime/message.py | 1 + bqskit/runtime/task.py | 3 +++ bqskit/runtime/worker.py | 30 ++++++++++++++++++++++++++++++ 5 files changed, 40 insertions(+) diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index 90ad2b964..8740c7170 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -193,6 +193,9 @@ def handle_message( task_diff = cast(int, payload) self.conn_to_employee_dict[conn].num_tasks += task_diff + elif msg == RuntimeMessage.COMMUNICATE: + self.broadcast(msg, payload) + else: raise RuntimeError(f'Unexpected message type: {msg.name}') diff --git a/bqskit/runtime/manager.py b/bqskit/runtime/manager.py index 0edd64f8f..14827af9e 100644 --- a/bqskit/runtime/manager.py +++ b/bqskit/runtime/manager.py @@ -175,6 +175,9 @@ def handle_message( paths = cast(List[str], payload) self.handle_importpath(paths) + elif msg == RuntimeMessage.COMMUNICATE: + self.broadcast(RuntimeMessage.COMMUNICATE, payload) + else: raise RuntimeError(f'Unexpected message type: {msg.name}') diff --git a/bqskit/runtime/message.py b/bqskit/runtime/message.py index c975099c8..d2585aef2 100644 --- a/bqskit/runtime/message.py +++ b/bqskit/runtime/message.py @@ -22,3 +22,4 @@ class RuntimeMessage(IntEnum): UPDATE = 13 IMPORTPATH = 14 READY = 15 + COMMUNICATE = 16 diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index 3d81f1bbd..329e48bd9 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -80,6 +80,9 @@ def __init__( """Set to true if this task should wake immediately on a result.""" self.log_context: dict[str, str] = log_context + """Additional context to be logged with this task.""" + + self.msg_buffer: list[Any] = [] @property def fnargs(self) -> tuple[Any, Any, Any]: diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 82cf1ab73..41779c99a 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -302,6 +302,10 @@ def recv_incoming(self) -> None: self._handle_cancel(addr) # TODO: preempt? + elif msg == RuntimeMessage.COMMUNICATE: + addrs, msg = cast(tuple[list[RuntimeAddress], Any], payload) + self._handle_communicate(addrs, msg) + elif msg == RuntimeMessage.IMPORTPATH: paths = cast(List[str], payload) for path in paths: @@ -370,6 +374,13 @@ def _handle_cancel(self, addr: RuntimeAddress) -> None: if not t.is_descendant_of(addr) ] + def _handle_communicate(self, addrs: list[RuntimeAddress], msg: Any) -> None: + for task_addr in addrs: + if task_addr not in self._tasks: + continue + + self._tasks[task_addr].msg_buffer.append(msg) + def _get_next_ready_task(self) -> RuntimeTask | None: """Return the next ready task if one exists, otherwise block.""" while True: @@ -657,6 +668,25 @@ def map( # Return future pointing to the mailbox return RuntimeFuture(mailbox_id) + def communicate(self, future: RuntimeFuture, msg: Any) -> None: + """Send a message to the task associated with `future`.""" + assert self._active_task is not None + assert future.mailbox_id in self._mailboxes + + num_slots = self._mailboxes[future.mailbox_id].expected_num_results + addrs = [ + RuntimeAddress(self._id, future.mailbox_id, slot_id) + for slot_id in range(num_slots) + ] + self._conn.send((RuntimeMessage.COMMUNICATE, (addrs, msg))) + + def get_messages(self) -> list[Any]: + """Return all messages received by the worker for this task.""" + assert self._active_task is not None + x = self._active_task.msg_buffer + self._active_task.msg_buffer = [] + return x + def cancel(self, future: RuntimeFuture) -> None: """Cancel all tasks associated with `future`.""" assert self._active_task is not None From 590f5ceb39e3991915ec246749081374724528b8 Mon Sep 17 00:00:00 2001 From: baobach Date: Wed, 10 Jul 2024 13:10:58 -0400 Subject: [PATCH 071/197] update surround when considering equal best score --- bqskit/ir/circuit.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index 876183f18..641969aa5 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -2222,6 +2222,7 @@ def score(node: Node) -> int: # Exhaustive Search while len(frontier) > 0: #_logger.debug(f'Current frontiers:{frontier}') + best_score_flag = False node = frontier.pop(0) _logger.debug('popped node:') _logger.debug(node[0]) @@ -2247,6 +2248,9 @@ def score(node: Node) -> int: except ValueError: if fail_quickly: continue + elif score(node) == best_score: + _logger.debug(f'current score is {score(node)} and is equal to the best score.') + best_score_flag = True # Bao's comment: The reason why I considering the node where the score is at least the same with # the best score is due to the need to expand the region to at least the number of qubits that we required # Why this works: as the filtering now limit the region to what we want, the node that we assuming should @@ -2296,10 +2300,12 @@ def score(node: Node) -> int: # Stop when exploring previously explored points point = CircuitPoint(cycle_index, qudit_index) - # if point in node[3]: - # _logger.debug(f"Skipping op {self[point]} because previously seen.") - # break - node[3].add(point) + if point in node[3]: + if not best_score_flag: + _logger.debug(f"Skipping op {self[point]} because previously seen.") + break + else: + node[3].add(point) # Continue until next operation if self.is_point_idle(point): From a61ef1435374393dd2bf4ebc73d63065bd56114d Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 10 Jul 2024 21:07:47 -0400 Subject: [PATCH 072/197] Update --- bqskit/ir/circuit.py | 434 ++++++++++-------------- bqskit/ir/iterator.py | 20 +- bqskit/ir/region.py | 9 + tests/ir/circuit/test_region_methods.py | 52 +++ 4 files changed, 254 insertions(+), 261 deletions(-) diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index 641969aa5..3d5befb1c 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -902,12 +902,38 @@ def first_on(self, qudit: int) -> CircuitPoint | None: """Report the point for the first operation on `qudit` if it exists.""" return self._front[qudit] - def next(self, point: CircuitPoint) -> set[CircuitPoint]: + def next(self, point: CircuitPoint | CircuitRegionLike) -> set[CircuitPoint]: """Return the points of operations dependent on the one at `point`.""" + if CircuitRegion.is_region(point): + points = [] + for cyc_op in self.operations_with_cycles(qudits_or_region=point): + points.append((cyc_op[0], cyc_op[1].location[0])) + + next_points = set() + for p in points: + for next in self.next(p): + if next not in points: + next_points.add(next) + + return next_points + return {p for p in self._dag[point][1].values() if p is not None} - def prev(self, point: CircuitPoint) -> set[CircuitPoint]: + def prev(self, point: CircuitPoint | CircuitRegionLike) -> set[CircuitPoint]: """Return the points of operations the one at `point` depends on.""" + if CircuitRegion.is_region(point): + points = [] + for cyc_op in self.operations_with_cycles(qudits_or_region=point): + points.append((cyc_op[0], cyc_op[1].location[0])) + + prev_points = set() + for p in points: + for prev in self.prev(p): + if prev not in points: + prev_points.add(prev) + + return prev_points + return {p for p in self._dag[point][0].values() if p is not None} # endregion @@ -1853,6 +1879,10 @@ def check_region( """ Check `region` to be a valid in the context of this circuit. + A CircuitRegion is valid if it is within the bounds of the circuit + and for every pair of operations in the region, there is no path + between them that exits the region. + Args: region (CircuitRegionLike): The region to check. @@ -1883,35 +1913,47 @@ def check_region( f"but region's maximum cycle is {region.max_cycle}.", ) - for qudit_index, cycle_intervals in region.items(): - for other_qudit_index, other_cycle_intervals in region.items(): - if cycle_intervals.overlaps(other_cycle_intervals): + if strict: + for qudit_index, cycle_intervals in region.items(): + for other_qudit_index, other_cycle_intervals in region.items(): + if not cycle_intervals.overlaps(other_cycle_intervals): + raise ValueError('Disconnect detected in region.') + + cycles_ops = self.operations_with_cycles(qudits_or_region=region, exclude=True) + points = [(cop[0], cop[1].location[0]) for cop in cycles_ops] + known_to_never_reenter = set() + + # Walk back from max cycle + for pt in sorted(points, key=lambda x: x[0], reverse=True): + + # Max cycle is valid in base case + if pt[0] == region.max_cycle: + continue + + frontier = self.next(pt) + while frontier: + pt2 = frontier.pop() + + # Walk only paths that exit the region + if pt2 in points: continue - involved_qudits = {qudit_index} - min_index = min( - cycle_intervals.upper, - other_cycle_intervals.upper, - ) - max_index = max( - cycle_intervals.lower, - other_cycle_intervals.lower, - ) - for cycle_index in range(min_index + 1, max_index): - try: - ops = self[cycle_index, involved_qudits] - except IndexError: - continue - if strict: - raise ValueError('Disconnect detected in region.') + # Stop walking after the max cycle + if pt2[0] >= region.max_cycle: + continue - if any(other_qudit_index in op.location for op in ops): - raise ValueError( - 'Disconnected region has excluded gate in middle.', - ) + # Skip this point if previously determined it to be good + if pt2 in known_to_never_reenter: + continue + + expansion = self.next(pt2) - for op in ops: - involved_qudits.update(op.location) + # If there is a path that re-enters the region, fail + if any(p in points for p in expansion): + raise ValueError('Disconnect detected in region.') + + frontier.update(expansion) + known_to_never_reenter.add(pt2) def straighten( self, @@ -2092,34 +2134,46 @@ def unfold_all(self) -> None: def surround( self, - point: CircuitPointLike, + point: CircuitPointLike | CircuitRegionLike, num_qudits: int, bounding_region: CircuitRegionLike | None = None, - fail_quickly: bool = False, - filter: Callable[[Circuit.surround.Node], bool] | None = None, + fail_quickly: bool | None = None, + filter: Callable[[CircuitRegion], bool] | None = None, + scoring_fn: Callable[[CircuitRegion], float] | None = None, ) -> CircuitRegion: """ Retrieve the maximal connected region in this circuit with `point`. Args: - point (CircuitPointLike): Find a surrounding region for this - point. This point will be in the final CircuitRegion. + point (CircuitPointLike | CircuitRegionLike): Find a surrounding + region for this point (or region). This point (or region) + will be in the final CircuitRegion. - num_qudits (int): The number of qudits to include in the region. + num_qudits (int): The maximum number of qudits to include in + the surrounding region. bounding_region (CircuitRegionLike | None): An optional region that bounds the resulting region. - fail_quickly (bool): If set to true, will not branch on + fail_quickly (bool | None): If set to true, will not branch on an invalid region. This will lead to a much faster result in some cases at the cost of only approximating - the maximal region. - - filter (Callable[[], bool] | None): The filter function determines - if a candidate region is valid. This is used to prune the - search space of the surround function. If None then no - filtering is done. It takes a surround search node and - returns a boolean. See the code for more type information. + the maximal region. (Deprecated, does nothing now besides + print a warning if a bool.) + + filter (Callable[[CircuitRegion], bool] | None): The filter + function determines if a candidate region is valid in the + caller's context. This is used to prune the search space + of the surround function. If None, then no filtering is + done. It takes a CircuitRegion and returns a + boolean. + + scoring_fn (Callable[[CircuitRegion], float] | None): The + scoring function determines the "best" surrounding region. + If left as None, then this will default to the region with + the most number of gates with larger gates worth more. + It takes a CircuitRegion and returns a float. Larger scores + are better. Raises: IndexError: If `point` is not a valid index. @@ -2155,239 +2209,117 @@ def surround( if filter is not None and not callable(filter): raise TypeError(f'Expected callable filter, got {type(filter)}.') - if bounding_region is not None: - bounding_region = CircuitRegion(bounding_region) - - point = self.normalize_point(point) - - init_op: Operation = self[point] # Allow starting at an idle point - - if init_op.num_qudits > num_qudits: - raise ValueError('Gate at point is too large for num_qudits.') + def default_scoring_fn(region: CircuitRegion) -> float: + return float(sum(op.num_qudits for op in self[region])) - HalfWire = Tuple[CircuitPoint, str] - """ - A HalfWire is a point in the circuit and a direction. + if scoring_fn is None: + scoring_fn = default_scoring_fn - This represents a point to start exploring from and a direction to - explore in. The direction is either 'left' or 'right'. - """ + if not callable(scoring_fn): + raise TypeError( + f'Expected callable scoring_fn, got {type(scoring_fn)}.', + ) - Node = Tuple[ - List[HalfWire], - Set[Tuple[int, Operation]], - CircuitLocation, - Set[CircuitPoint], - ] - """ - A Node in the search tree. + if fail_quickly is not None: + warnings.warn( + 'The fail_quickly argument is deprecated and does nothing. ' + 'Surround will always attempt to find the maximal region. ' + 'This argument will be removed in a future release and this ' + 'warning will become an error.', + DeprecationWarning, + ) - Each node represents a region that may grow further. The data structure - tracks all HalfWires in the region and the set of operations inside the - region. During node exploration each HalfWire is walked until we find a - multi-qudit gate. Multi- qudit gates form branches in the tree on - whether on the gate should be included. The node structure additionally - stores the set of qudit indices involved in the region currently. Also, - we track points that have already been explored to reduce repetition. - """ + if bounding_region is not None: + bounding_region = CircuitRegion(bounding_region) - # Initialize the frontier - init_node = ( - [ - (CircuitPoint(point[0], qudit_index), 'left') - for qudit_index in init_op.location - ] - + [ - (CircuitPoint(point[0], qudit_index), 'right') - for qudit_index in init_op.location - ], - {(point[0], init_op)}, - init_op.location, - {CircuitPoint(point[0], q) for q in init_op.location}, - ) + if CircuitPoint.is_point(point): + if self.is_point_idle(point): + init_region = CircuitRegion({point[1]: (point[0], point[0])}) + else: + init_region = self.get_region([point]) + elif CircuitRegion.is_region(point): + init_region = CircuitRegion(point) + else: + raise TypeError( + f'Expected CircuitPoint or CircuitRegion, got {type(point)}.', + ) - if filter is not None and not filter(init_node): - raise ValueError('Initial node does not pass filter.') + if init_region.num_qudits > num_qudits: + raise ValueError('Initial region is too large for num_qudits.') - frontier: list[Node] = [init_node] + if filter is not None and not filter(init_region): + raise ValueError('Initial region does not pass filter.') + # Initialize Search + frontier: list[CircuitRegion] = [init_region] + seen: set[CircuitRegion] = set() + # Track best so far - def score(node: Node) -> int: - return sum(op[1].num_qudits for op in node[1]) - - best_score = score(init_node) - _logger.debug(f'best_score: {best_score}') - best_region = self.get_region({(point[0], init_op.location[0])}) + best_score = (scoring_fn(init_region), init_region.num_qudits) + best_region = init_region # Exhaustive Search while len(frontier) > 0: - #_logger.debug(f'Current frontiers:{frontier}') - best_score_flag = False node = frontier.pop(0) - _logger.debug('popped node:') - _logger.debug(node[0]) - _logger.debug(f'Items remaining in the frontier: {len(frontier)}') - - if filter is not None and not filter(node): - _logger.debug('Node failed filter; skipping.') - _logger.debug(f'Node failed location: {node[2]}') - continue # Evaluate node - _logger.debug(f'current_score: {score(node)}') - if score(node) > best_score: # or (score(node) == best_score and len(node[2]) > len(best_region)): - # Calculate region from best node and return - points = {(cycle, op.location[0]) for cycle, op in node[1]} - - try: - best_region = self.get_region(points) - best_score = score(node) - _logger.debug(f'new best: {best_region}.') - - # Need to reject bad regions - except ValueError: - if fail_quickly: - continue - elif score(node) == best_score: - _logger.debug(f'current score is {score(node)} and is equal to the best score.') - best_score_flag = True - # Bao's comment: The reason why I considering the node where the score is at least the same with - # the best score is due to the need to expand the region to at least the number of qubits that we required - # Why this works: as the filtering now limit the region to what we want, the node that we assuming should - # satisfy the filtering and bounded by the region that we want. Therefore, by using union to take into account - # all regions, we hope to cover the amount of qubits that we want - - # elif score(node) == best_score and len(best_region) < num_qudits: - # # Calculate region from best node and return - # points = {(cycle, op.location[0]) for cycle, op in node[1]} - # try: - # new_region = self.get_region(points) - # _logger.debug(f'new region: {new_region}') - # # If two region is different, merge them. As the region is bounded, we do not have the case where two - # # regions has no overlap - # if new_region.location != best_region.location: - # best_region = best_region.union(new_region) - # best_score = score(node) - # _logger.debug(f'new best: {best_region}.') - # - # # Need to reject bad regions - # except ValueError: - # if fail_quickly: - # continue + node_score = (scoring_fn(node), node.num_qudits) + if node_score > best_score: + best_region = node + best_score = node_score # Expand node - absorbed_gates: set[tuple[int, Operation]] = set() - branches: set[tuple[int, int, Operation]] = set() - before_branch_half_wires: dict[int, HalfWire] = {} - for i, half_wire in enumerate(node[0]): - _logger.debug(f"Exploring {half_wire} .....") - cycle_index, qudit_index = half_wire[0] - step = -1 if half_wire[1] == 'left' else 1 - - while True: - - # Take a step - cycle_index += step - - # Stop at edges - if cycle_index < 0 or cycle_index >= self.num_cycles: - break - - # Stop when outside bounds - if bounding_region is not None: - if (cycle_index, qudit_index) not in bounding_region: - break - - # Stop when exploring previously explored points - point = CircuitPoint(cycle_index, qudit_index) - if point in node[3]: - if not best_score_flag: - _logger.debug(f"Skipping op {self[point]} because previously seen.") - break + for point in self.next(node).union(self.prev(node)): + # Create new region by adding the gate at this point + new_region = {k: v for k, v in node.items()} + op = self[point] + for qudit in op.location: + if qudit not in new_region: + new_region[qudit] = (point[0], point[0]) else: - node[3].add(point) - - # Continue until next operation - if self.is_point_idle(point): - continue - op: Operation = self[cycle_index, qudit_index] - - # Gates already in region stop the half_wire - if (cycle_index, op) in node[1]: - break - - # Gates already accounted for stop the half_wire - if (cycle_index, op) in absorbed_gates: - break - - if (cycle_index, op) in [(c, o) for h, c, o in branches]: - break + new_region[qudit] = ( + min(new_region[qudit][0], point[0]), + max(new_region[qudit][1], point[0]), + ) - # Absorb single-qudit gates - if len(op.location) == 1: - absorbed_gates.add((cycle_index, op)) - continue + # Discard too large regions + if len(new_region) > num_qudits: + continue - # Operations that are too large stop the half_wire - if len(op.location.union(node[2])) > num_qudits: - break + # Discard invalid regions + if not self.is_valid_region(new_region): + continue - # Otherwise branch on the operation - _logger.debug(f"Adding {(i, cycle_index, op)} to branch") - branches.add((i, cycle_index, op)) + # # Absorb single-qudit gates + # while True: + # new_exp = self.next(new_region).union(self.prev(new_region)) + # if not any(op.num_qudits == 1 for op in self[new_exp]): + # break + # for point in new_exp: + # op = self[point] + # if len(op.location) == 1: + # qudit = op.location[0] + # if qudit not in new_region: + # new_region[qudit] = (point[0], point[0]) + # else: + # new_region[qudit] = ( + # min(new_region[qudit][0], point[0]), + # max(new_region[qudit][1], point[0]), + # ) + + new_region = CircuitRegion(new_region) + + # Check uniqueness + if new_region in seen: + continue - # Track state of half wire right before branch - prev_point = CircuitPoint(cycle_index - step, qudit_index) - before_branch_half_wires[i] = (prev_point, half_wire[1]) - break + # Check filter + if filter is not None and not filter(new_region): + continue - # Compute children and extend frontier - for half_wire_index, cycle_index, op in branches: - _logger.debug(f"Expanding branch {(half_wire_index, cycle_index, op)}") - child_half_wires = [ - half_wire - for i, half_wire in before_branch_half_wires.items() - if half_wire_index != i - ] - - qudit = node[0][half_wire_index][0].qudit - direction = node[0][half_wire_index][1] - left_expansion = [ - (CircuitPoint(cycle_index, qudit_index), 'left') - for qudit_index in op.location - if qudit != qudit_index or direction == 'left' - ] - right_expansion = [ - (CircuitPoint(cycle_index, qudit_index), 'right') - for qudit_index in op.location - if qudit != qudit_index or direction == 'right' - ] - expansion = left_expansion + right_expansion - - # Branch/Gate not taken - _logger.debug(f"Branch/gate not taken: {(child_half_wires, node[1] | absorbed_gates, node[2], node[3])}") - frontier.append(( - child_half_wires, - node[1] | absorbed_gates, - node[2], - node[3], - )) - - # Branch/Gate taken - op_points = {CircuitPoint(cycle_index, q) for q in op.location} - _logger.debug( - f"Branch/Gate taken: {(list(set(child_half_wires + expansion)), node[1] | absorbed_gates | {(cycle_index, op)}, node[2].union(op.location), node[3] | op_points)}") - frontier.append(( - list(set(child_half_wires + expansion)), - node[1] | absorbed_gates | {(cycle_index, op)}, - node[2].union(op.location), - node[3] | op_points, - )) - - # Append terminal node to handle absorbed gates with no branches - if len(node[1] | absorbed_gates) != len(node[1]): - frontier.append(([], node[1] | absorbed_gates, *node[2:])) - _logger.debug(f"Terminal node {frontier[-1]}") + # Expand frontier + frontier.append(new_region) + seen.add(new_region) return best_region diff --git a/bqskit/ir/iterator.py b/bqskit/ir/iterator.py index 87e9a3bd2..fa1813c69 100644 --- a/bqskit/ir/iterator.py +++ b/bqskit/ir/iterator.py @@ -248,18 +248,18 @@ def __init__( self.min_cycle = self.region.min_cycle self.max_cycle = self.region.max_cycle - if start < (self.min_cycle, self.min_qudit): - start = CircuitPoint(self.min_cycle, self.min_qudit) + if self.start < (self.min_cycle, self.min_qudit): + self.start = CircuitPoint(self.min_cycle, self.min_qudit) - if end > (self.max_cycle, self.max_qudit): - end = CircuitPoint(self.max_cycle, self.max_qudit) + if self.end > (self.max_cycle, self.max_qudit): + self.end = CircuitPoint(self.max_cycle, self.max_qudit) - assert isinstance(start, CircuitPoint) # TODO: Typeguard - assert isinstance(end, CircuitPoint) # TODO: Typeguard + assert isinstance(self.start, CircuitPoint) # TODO: Typeguard + assert isinstance(self.end, CircuitPoint) # TODO: Typeguard # Pointer into the circuit structure - self.cycle = start.cycle if not self.reverse else end.cycle - self.qudit = start.qudit if not self.reverse else end.qudit + self.cycle = self.start.cycle if not self.reverse else self.end.cycle + self.qudit = self.start.qudit if not self.reverse else self.end.qudit # Used to track changes to circuit structure self.num_ops = self.circuit.num_operations @@ -330,6 +330,8 @@ def __next__(self) -> Operation | tuple[int, Operation]: self.qudits_to_skip.add(self.qudit) continue + self.qudits_to_skip.update(op.location) + if self.exclude: if not all(qudit in self.qudits for qudit in op.location): continue @@ -340,8 +342,6 @@ def __next__(self) -> Operation | tuple[int, Operation]: ): continue - self.qudits_to_skip.update(op.location) - if self.and_cycles: return self.cycle, op diff --git a/bqskit/ir/region.py b/bqskit/ir/region.py index 78197e973..0d5cf9432 100644 --- a/bqskit/ir/region.py +++ b/bqskit/ir/region.py @@ -187,6 +187,11 @@ def empty(self) -> bool: """Return true if this region is empty.""" return len(self) == 0 + @property + def num_qudits(self) -> int: + """Return the number of qudits in this region.""" + return len(self) + def shift_left(self, amount_to_shift: int) -> CircuitRegion: """ Shift the region to the left by `amount_to_shift`. @@ -292,6 +297,10 @@ def overlaps(self, other: CircuitPointLike | CircuitRegionLike) -> bool: % type(other), ) + def copy(self) -> CircuitRegion: + """Return a deep copy of this region.""" + return CircuitRegion(self._intervals) + def __contains__(self, other: object) -> bool: if is_integer(other): return other in self._intervals.keys() diff --git a/tests/ir/circuit/test_region_methods.py b/tests/ir/circuit/test_region_methods.py index 18c34c86c..59ca4c954 100644 --- a/tests/ir/circuit/test_region_methods.py +++ b/tests/ir/circuit/test_region_methods.py @@ -16,6 +16,7 @@ from bqskit.ir.point import CircuitPoint from bqskit.ir.point import CircuitPointLike from bqskit.ir.region import CircuitRegion +from bqskit.ir.location import CircuitLocation def check_no_idle_cycles(circuit: Circuit) -> None: @@ -338,3 +339,54 @@ def test_with_fold(self, r6_qudit_circuit: Circuit) -> None: region = r6_qudit_circuit.surround((cycle, qudit), 4) r6_qudit_circuit.fold(region) assert r6_qudit_circuit.get_unitary() == utry + + def test_surround_symmetric(self): + circuit = Circuit(6) + # whole wall of even + circuit.append_gate(CNOTGate(), [0, 1]) + circuit.append_gate(CNOTGate(), [2, 3]) + circuit.append_gate(CNOTGate(), [4, 5]) + + # one odd gate; problematic point in test + circuit.append_gate(CNOTGate(), [3, 4]) + + # whole wall of even + circuit.append_gate(CNOTGate(), [0, 1]) + circuit.append_gate(CNOTGate(), [2, 3]) + circuit.append_gate(CNOTGate(), [4, 5]) + + region = circuit.surround((1, 3), 4) + assert region.location == CircuitLocation([2, 3, 4, 5]) + + def test_surround_filter_hard(self): + circuit = Circuit(7) + # whole wall of even + circuit.append_gate(CNOTGate(), [0, 1]) + circuit.append_gate(CNOTGate(), [2, 3]) + circuit.append_gate(CNOTGate(), [4, 5]) + + # one odd gate; problematic point in test + circuit.append_gate(CNOTGate(), [3, 4]) + + # whole wall of even + circuit.append_gate(CNOTGate(), [0, 1]) + circuit.append_gate(CNOTGate(), [2, 3]) + circuit.append_gate(CNOTGate(), [4, 5]) + + # more odd gates to really test filter + circuit.append_gate(CNOTGate(), [5, 6]) + circuit.append_gate(CNOTGate(), [5, 6]) + circuit.append_gate(CNOTGate(), [5, 6]) + circuit.append_gate(CNOTGate(), [5, 6]) + circuit.append_gate(CNOTGate(), [5, 6]) + + region = circuit.surround((1, 3), 4, None, None, lambda region: (region.min_qudit > 1 and region.max_qudit < 6)) + assert region.location == CircuitLocation([2, 3, 4, 5]) + +def test_check_region() -> None: + c = Circuit(4) + c.append_gate(CNOTGate(), [1, 2]) + c.append_gate(CNOTGate(), [0, 1]) + c.append_gate(CNOTGate(), [2, 3]) + c.append_gate(CNOTGate(), [1, 2]) + assert not c.is_valid_region({1:(0, 2), 2:(0, 2), 3:(0, 2)}) From fe806335bdff37f25dcb5864aac84e7ee73ab30b Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 10 Jul 2024 21:08:20 -0400 Subject: [PATCH 073/197] Removed unused code --- bqskit/ir/circuit.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index 3d5befb1c..22bbb3356 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -2289,23 +2289,6 @@ def default_scoring_fn(region: CircuitRegion) -> float: # Discard invalid regions if not self.is_valid_region(new_region): continue - - # # Absorb single-qudit gates - # while True: - # new_exp = self.next(new_region).union(self.prev(new_region)) - # if not any(op.num_qudits == 1 for op in self[new_exp]): - # break - # for point in new_exp: - # op = self[point] - # if len(op.location) == 1: - # qudit = op.location[0] - # if qudit not in new_region: - # new_region[qudit] = (point[0], point[0]) - # else: - # new_region[qudit] = ( - # min(new_region[qudit][0], point[0]), - # max(new_region[qudit][1], point[0]), - # ) new_region = CircuitRegion(new_region) From c71998d22c8ef96593d6d960ca13605b2fa10af6 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 10 Jul 2024 21:35:31 -0400 Subject: [PATCH 074/197] Pre-commit --- bqskit/ir/circuit.py | 55 ++++++++++++++-------- bqskit/ir/gates/parameterized/u8.py | 16 +++---- bqskit/passes/mapping/placement/trivial.py | 2 +- tests/ir/circuit/test_region_methods.py | 15 ++++-- 4 files changed, 55 insertions(+), 33 deletions(-) diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index 22bbb3356..35aefdc15 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -5,17 +5,15 @@ import logging import warnings from typing import Any +from typing import Callable from typing import cast from typing import Collection from typing import Dict from typing import Iterable from typing import Iterator -from typing import List from typing import Optional from typing import overload from typing import Sequence -from typing import Set -from typing import Tuple from typing import TYPE_CHECKING import numpy as np @@ -25,6 +23,7 @@ from bqskit.ir.gates.circuitgate import CircuitGate from bqskit.ir.gates.constant.unitary import ConstantUnitaryGate from bqskit.ir.gates.measure import MeasurementPlaceholder +from bqskit.ir.interval import CycleInterval from bqskit.ir.iterator import CircuitIterator from bqskit.ir.lang import get_language from bqskit.ir.location import CircuitLocation @@ -902,7 +901,10 @@ def first_on(self, qudit: int) -> CircuitPoint | None: """Report the point for the first operation on `qudit` if it exists.""" return self._front[qudit] - def next(self, point: CircuitPoint | CircuitRegionLike) -> set[CircuitPoint]: + def next( + self, + point: CircuitPointLike | CircuitRegionLike, + ) -> set[CircuitPoint]: """Return the points of operations dependent on the one at `point`.""" if CircuitRegion.is_region(point): points = [] @@ -917,9 +919,16 @@ def next(self, point: CircuitPoint | CircuitRegionLike) -> set[CircuitPoint]: return next_points - return {p for p in self._dag[point][1].values() if p is not None} + return { + p + for p in self._dag[point][1].values() # type: ignore + if p is not None + } - def prev(self, point: CircuitPoint | CircuitRegionLike) -> set[CircuitPoint]: + def prev( + self, + point: CircuitPointLike | CircuitRegionLike, + ) -> set[CircuitPoint]: """Return the points of operations the one at `point` depends on.""" if CircuitRegion.is_region(point): points = [] @@ -934,7 +943,11 @@ def prev(self, point: CircuitPoint | CircuitRegionLike) -> set[CircuitPoint]: return prev_points - return {p for p in self._dag[point][0].values() if p is not None} + return { + p + for p in self._dag[point][0].values() # type: ignore + if p is not None + } # endregion @@ -1919,7 +1932,9 @@ def check_region( if not cycle_intervals.overlaps(other_cycle_intervals): raise ValueError('Disconnect detected in region.') - cycles_ops = self.operations_with_cycles(qudits_or_region=region, exclude=True) + cycles_ops = self.operations_with_cycles( + qudits_or_region=region, exclude=True, + ) points = [(cop[0], cop[1].location[0]) for cop in cycles_ops] known_to_never_reenter = set() @@ -2253,7 +2268,7 @@ def default_scoring_fn(region: CircuitRegion) -> float: # Initialize Search frontier: list[CircuitRegion] = [init_region] seen: set[CircuitRegion] = set() - + # Track best so far best_score = (scoring_fn(init_region), init_region.num_qudits) best_region = init_region @@ -2271,26 +2286,28 @@ def default_scoring_fn(region: CircuitRegion) -> float: # Expand node for point in self.next(node).union(self.prev(node)): # Create new region by adding the gate at this point - new_region = {k: v for k, v in node.items()} + new_region_bldr = {k: v for k, v in node.items()} op = self[point] for qudit in op.location: - if qudit not in new_region: - new_region[qudit] = (point[0], point[0]) + if qudit not in new_region_bldr: + new_region_bldr[qudit] = CycleInterval( + point[0], point[0], + ) else: - new_region[qudit] = ( - min(new_region[qudit][0], point[0]), - max(new_region[qudit][1], point[0]), + new_region_bldr[qudit] = CycleInterval( + min(new_region_bldr[qudit][0], point[0]), + max(new_region_bldr[qudit][1], point[0]), ) # Discard too large regions - if len(new_region) > num_qudits: + if len(new_region_bldr) > num_qudits: continue # Discard invalid regions - if not self.is_valid_region(new_region): + if not self.is_valid_region(new_region_bldr): continue - - new_region = CircuitRegion(new_region) + + new_region = CircuitRegion(new_region_bldr) # Check uniqueness if new_region in seen: diff --git a/bqskit/ir/gates/parameterized/u8.py b/bqskit/ir/gates/parameterized/u8.py index 85808f713..ce4c3337e 100644 --- a/bqskit/ir/gates/parameterized/u8.py +++ b/bqskit/ir/gates/parameterized/u8.py @@ -91,12 +91,12 @@ def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: [ # wrt params[0] [-s1 * c2 * p1, c1 * p3, -s1 * s2 * p4], [ - -c1 * c2 * c3 * p1 * p2 * m3, -s1 * \ - c3 * p2, -c1 * s2 * c3 * p2 * m3 * p4, + -c1 * c2 * c3 * p1 * p2 * m3, -s1 + * c3 * p2, -c1 * s2 * c3 * p2 * m3 * p4, ], [ - -c1 * c2 * s3 * p1 * m3 * p5, -s1 * \ - s3 * p5, -c1 * s2 * s3 * m3 * p4 * p5, + -c1 * c2 * s3 * p1 * m3 * p5, -s1 + * s3 * p5, -c1 * s2 * s3 * m3 * p4 * p5, ], ], @@ -142,8 +142,8 @@ def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: [ # wrt params[4] [0, 0, 0], [ - -1j * s1 * c2 * c3 * p1 * p2 * m3, 1j * c1 * \ - c3 * p2, -1j * s1 * s2 * c3 * p2 * m3 * p4, + -1j * s1 * c2 * c3 * p1 * p2 * m3, 1j * c1 + * c3 * p2, -1j * s1 * s2 * c3 * p2 * m3 * p4, ], [1j * s2 * c3 * m2 * m4, 0, -1j * c2 * c3 * m1 * m2], ], @@ -178,8 +178,8 @@ def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: [0, 0, 0], [-1j * s2 * s3 * m4 * m5, 0, 1j * c2 * s3 * m1 * m5], [ - -1j * s1 * c2 * s3 * p1 * m3 * p5, 1j * c1 * \ - s3 * p5, -1j * s1 * s2 * s3 * m3 * p4 * p5, + -1j * s1 * c2 * s3 * p1 * m3 * p5, 1j * c1 + * s3 * p5, -1j * s1 * s2 * s3 * m3 * p4 * p5, ], ], ], diff --git a/bqskit/passes/mapping/placement/trivial.py b/bqskit/passes/mapping/placement/trivial.py index df54d94e5..95ee91d3b 100644 --- a/bqskit/passes/mapping/placement/trivial.py +++ b/bqskit/passes/mapping/placement/trivial.py @@ -20,7 +20,7 @@ async def run(self, circuit: Circuit, data: PassData) -> None: model = BasePass.get_model(circuit, data) data['placement'] = trivial_placement - _logger.info(f'Placed qudits on {data["placement"]}') + _logger.info(f'Placed qudits on {data['placement']}') # Raise an error if this is not a valid placement sg = model.coupling_graph.get_subgraph(data['placement']) diff --git a/tests/ir/circuit/test_region_methods.py b/tests/ir/circuit/test_region_methods.py index 59ca4c954..cd821f53b 100644 --- a/tests/ir/circuit/test_region_methods.py +++ b/tests/ir/circuit/test_region_methods.py @@ -13,10 +13,10 @@ from bqskit.ir.gates.constant.h import HGate from bqskit.ir.gates.constant.x import XGate from bqskit.ir.gates.parameterized.u3 import U3Gate +from bqskit.ir.location import CircuitLocation from bqskit.ir.point import CircuitPoint from bqskit.ir.point import CircuitPointLike from bqskit.ir.region import CircuitRegion -from bqskit.ir.location import CircuitLocation def check_no_idle_cycles(circuit: Circuit) -> None: @@ -340,7 +340,7 @@ def test_with_fold(self, r6_qudit_circuit: Circuit) -> None: r6_qudit_circuit.fold(region) assert r6_qudit_circuit.get_unitary() == utry - def test_surround_symmetric(self): + def test_surround_symmetric(self) -> None: circuit = Circuit(6) # whole wall of even circuit.append_gate(CNOTGate(), [0, 1]) @@ -358,7 +358,7 @@ def test_surround_symmetric(self): region = circuit.surround((1, 3), 4) assert region.location == CircuitLocation([2, 3, 4, 5]) - def test_surround_filter_hard(self): + def test_surround_filter_hard(self) -> None: circuit = Circuit(7) # whole wall of even circuit.append_gate(CNOTGate(), [0, 1]) @@ -380,13 +380,18 @@ def test_surround_filter_hard(self): circuit.append_gate(CNOTGate(), [5, 6]) circuit.append_gate(CNOTGate(), [5, 6]) - region = circuit.surround((1, 3), 4, None, None, lambda region: (region.min_qudit > 1 and region.max_qudit < 6)) + region = circuit.surround( + (1, 3), 4, None, None, lambda region: ( + region.min_qudit > 1 and region.max_qudit < 6 + ), + ) assert region.location == CircuitLocation([2, 3, 4, 5]) + def test_check_region() -> None: c = Circuit(4) c.append_gate(CNOTGate(), [1, 2]) c.append_gate(CNOTGate(), [0, 1]) c.append_gate(CNOTGate(), [2, 3]) c.append_gate(CNOTGate(), [1, 2]) - assert not c.is_valid_region({1:(0, 2), 2:(0, 2), 3:(0, 2)}) + assert not c.is_valid_region({1: (0, 2), 2: (0, 2), 3: (0, 2)}) From 7f75b12f16bfd2b2481b4c6dc6be2cd58e57533c Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 10 Jul 2024 21:38:53 -0400 Subject: [PATCH 075/197] Fixed a thing? --- bqskit/passes/mapping/placement/trivial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/passes/mapping/placement/trivial.py b/bqskit/passes/mapping/placement/trivial.py index 95ee91d3b..df54d94e5 100644 --- a/bqskit/passes/mapping/placement/trivial.py +++ b/bqskit/passes/mapping/placement/trivial.py @@ -20,7 +20,7 @@ async def run(self, circuit: Circuit, data: PassData) -> None: model = BasePass.get_model(circuit, data) data['placement'] = trivial_placement - _logger.info(f'Placed qudits on {data['placement']}') + _logger.info(f'Placed qudits on {data["placement"]}') # Raise an error if this is not a valid placement sg = model.coupling_graph.get_subgraph(data['placement']) From 0dca1c50b7ec91e316d68ecf1ebb193c42d28d7e Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 17 Jul 2024 15:07:03 -0400 Subject: [PATCH 076/197] Performance Improvements --- bqskit/ir/circuit.py | 62 ++++++++++++++++++++----- tests/ir/circuit/test_region_methods.py | 4 +- 2 files changed, 53 insertions(+), 13 deletions(-) diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index 35aefdc15..7d6d06b2a 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -2286,28 +2286,68 @@ def default_scoring_fn(region: CircuitRegion) -> float: # Expand node for point in self.next(node).union(self.prev(node)): # Create new region by adding the gate at this point - new_region_bldr = {k: v for k, v in node.items()} + region_bldr = {k: v for k, v in node.items()} op = self[point] + valid_region = True for qudit in op.location: - if qudit not in new_region_bldr: - new_region_bldr[qudit] = CycleInterval( - point[0], point[0], + if qudit not in region_bldr: + region_bldr[qudit] = CycleInterval(point[0], point[0]) + + elif point[0] < region_bldr[qudit][0]: + # Check for gates in the middle not in region + if any( + not self.is_point_idle((i, qudit)) + for i in range(point[0] + 1, region_bldr[qudit][0]) + ): + valid_region = False + break + + # Absorb Single-qudit gates + index = point[0] + while index > 0: + if not self.is_point_idle((index - 1, qudit)): + prev_op = self[index - 1, qudit] + if len(prev_op.location) != 1: + break + index -= 1 + + region_bldr[qudit] = CycleInterval( + index, + region_bldr[qudit][1], ) - else: - new_region_bldr[qudit] = CycleInterval( - min(new_region_bldr[qudit][0], point[0]), - max(new_region_bldr[qudit][1], point[0]), + + elif point[0] > region_bldr[qudit][1]: + # Check for gates in the middle not in region + if any( + not self.is_point_idle((i, qudit)) + for i in range(region_bldr[qudit][1] + 1, point[0]) + ): + valid_region = False + break + + # Absorb Single-qudit gates + index = point[0] + while index < self.num_cycles - 1: + if not self.is_point_idle((index + 1, qudit)): + next_op = self[index + 1, qudit] + if len(next_op.location) != 1: + break + index += 1 + + region_bldr[qudit] = CycleInterval( + region_bldr[qudit][0], + index, ) # Discard too large regions - if len(new_region_bldr) > num_qudits: + if len(region_bldr) > num_qudits: continue # Discard invalid regions - if not self.is_valid_region(new_region_bldr): + if not valid_region: continue - new_region = CircuitRegion(new_region_bldr) + new_region = CircuitRegion(region_bldr) # Check uniqueness if new_region in seen: diff --git a/tests/ir/circuit/test_region_methods.py b/tests/ir/circuit/test_region_methods.py index cd821f53b..5dfd50c19 100644 --- a/tests/ir/circuit/test_region_methods.py +++ b/tests/ir/circuit/test_region_methods.py @@ -297,7 +297,7 @@ def test_small_circuit_2(self) -> None: circuit.append_gate(HGate(), 1) circuit.append_gate(HGate(), 2) region = circuit.surround((0, 1), 2) - assert region == CircuitRegion({0: (0, 1), 1: (0, 2)}) + assert region == CircuitRegion({0: (0, 1), 1: (0, 3)}) def test_small_circuit_3(self) -> None: circuit = Circuit(3) @@ -325,7 +325,7 @@ def test_through_middle_of_outside(self) -> None: circuit.append_gate(CNOTGate(), (0, 2)) circuit.append_gate(CNOTGate(), (0, 1)) region = circuit.surround((1, 0), 2) - assert region == CircuitRegion({0: (0, 1), 1: (0, 1)}) + assert region == CircuitRegion({0: (0, 1), 1: (0, 2)}) def test_with_fold(self, r6_qudit_circuit: Circuit) -> None: cycle = 0 From e6c0dea7abd18cc95a7bea87f0b7b1721536655b Mon Sep 17 00:00:00 2001 From: Marc Davis Date: Wed, 17 Jul 2024 17:42:31 -0400 Subject: [PATCH 077/197] Implemented __del__ to clean up hanging coroutines --- bqskit/runtime/task.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index b55037a87..8f4d36f32 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -110,3 +110,8 @@ async def run(self) -> Any: def is_descendant_of(self, addr: RuntimeAddress) -> bool: """Return true if `addr` identifies a parent (or this) task.""" return addr == self.return_address or addr in self.breadcrumbs + + def __del__(self) -> None: + """Close the coroutine to make sure it is not left hanging.""" + if self.coro is not None: + self.coro.close() From 3c8e17e71cc7467d65bd053f79c7043637535b27 Mon Sep 17 00:00:00 2001 From: Marc Davis Date: Thu, 18 Jul 2024 11:39:13 -0400 Subject: [PATCH 078/197] added cancel to task instead of relying on del --- bqskit/runtime/task.py | 9 +++++---- bqskit/runtime/worker.py | 8 ++++++++ 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index 8f4d36f32..74e6e07e1 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -101,6 +101,11 @@ def start(self) -> None: """Initialize the task.""" self.coro = self.run() + def cancel(self) -> None: + """Ask the coroutine to gracefully exit.""" + if self.coro is not None: + self.coro.close() + async def run(self) -> Any: """Task coroutine wrapper.""" if inspect.iscoroutinefunction(self.fnargs[0]): @@ -111,7 +116,3 @@ def is_descendant_of(self, addr: RuntimeAddress) -> bool: """Return true if `addr` identifies a parent (or this) task.""" return addr == self.return_address or addr in self.breadcrumbs - def __del__(self) -> None: - """Close the coroutine to make sure it is not left hanging.""" - if self.coro is not None: - self.coro.close() diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 5c4b3ca18..8113bd419 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -315,6 +315,9 @@ def _handle_cancel(self, addr: RuntimeAddress) -> None: to discard cancelled tasks when popping from it. Therefore, we do not do anything with `self._ready_task_ids` here. + We also must make sure to call the `cancel` function of the + tasks to make sure their coroutines are cleaned up. + Also, we also don't need to send out cancel messages for cancelled children tasks since other workers can evaluate that for themselves using breadcrumbs and the original `addr` cancel @@ -327,12 +330,17 @@ def _handle_cancel(self, addr: RuntimeAddress) -> None: if task.is_descendant_of(addr): for mailbox_id in self._tasks[key].owned_mailboxes: self._mailboxes.pop(mailbox_id) + task.cancel() self._tasks = { a: t for a, t in self._tasks.items() if not t.is_descendant_of(addr) } # Remove all tasks that are children of `addr` from delayed tasks + for task in self._delayed_tasks: + if task.is_descendant_of(addr): + task.cancel() + self._delayed_tasks = [ t for t in self._delayed_tasks if not t.is_descendant_of(addr) From 84e92439465ca7f5730259b928775b1fcf8de1df Mon Sep 17 00:00:00 2001 From: Marc Davis Date: Thu, 18 Jul 2024 11:43:15 -0400 Subject: [PATCH 079/197] removed a trailing whitespace --- bqskit/runtime/task.py | 1 - 1 file changed, 1 deletion(-) diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index 74e6e07e1..c8fd39db2 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -115,4 +115,3 @@ async def run(self) -> Any: def is_descendant_of(self, addr: RuntimeAddress) -> bool: """Return true if `addr` identifies a parent (or this) task.""" return addr == self.return_address or addr in self.breadcrumbs - From fcc0a87f632c2570e42a31ea3edf95ead729f32c Mon Sep 17 00:00:00 2001 From: Marc Davis Date: Sun, 21 Jul 2024 04:06:26 -0400 Subject: [PATCH 080/197] added explicit cancelling from worker instead of calling coro.close() in __del__ --- bqskit/runtime/task.py | 3 +-- bqskit/runtime/worker.py | 15 ++++++++++----- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index c8fd39db2..30f39a6c6 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -103,8 +103,7 @@ def start(self) -> None: def cancel(self) -> None: """Ask the coroutine to gracefully exit.""" - if self.coro is not None: - self.coro.close() + self.coro.close() async def run(self) -> Any: """Task coroutine wrapper.""" diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 8113bd419..87cbcfb66 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -326,26 +326,31 @@ def _handle_cancel(self, addr: RuntimeAddress) -> None: self._cancelled_task_ids.add(addr) # Remove all tasks that are children of `addr` from initialized tasks + error = None for key, task in self._tasks.items(): if task.is_descendant_of(addr): for mailbox_id in self._tasks[key].owned_mailboxes: self._mailboxes.pop(mailbox_id) - task.cancel() + try: + task.cancel() + except Exception as e: + if error is None: + error = e self._tasks = { a: t for a, t in self._tasks.items() if not t.is_descendant_of(addr) } # Remove all tasks that are children of `addr` from delayed tasks - for task in self._delayed_tasks: - if task.is_descendant_of(addr): - task.cancel() - self._delayed_tasks = [ t for t in self._delayed_tasks if not t.is_descendant_of(addr) ] + # if there was an error earlier, raise it now + if error is not None: + raise error + def _get_next_ready_task(self) -> RuntimeTask | None: """Return the next ready task if one exists, otherwise None.""" while True: From a5cbf510272ae8a25bdbc514399ddd627d248a0e Mon Sep 17 00:00:00 2001 From: Marc Davis Date: Sun, 21 Jul 2024 18:02:38 -0400 Subject: [PATCH 081/197] added explicit handling of the case where coro is None --- bqskit/runtime/task.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index 30f39a6c6..4ee2a6453 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -103,7 +103,10 @@ def start(self) -> None: def cancel(self) -> None: """Ask the coroutine to gracefully exit.""" - self.coro.close() + if self.coro is not None: + self.coro.close() + else: + raise RuntimeError('Task was cancelled before its coroutine was started.') async def run(self) -> Any: """Task coroutine wrapper.""" From 07f95ebf9d3844bd258812ff329a9027a53f9655 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 22 Jul 2024 16:53:01 -0400 Subject: [PATCH 082/197] Update --- bqskit/ir/gates/composed/powergate.py | 128 +++++++------------ bqskit/ir/gates/parameterized/u8.py | 16 +-- bqskit/passes/mapping/placement/trivial.py | 2 +- bqskit/qis/unitary/unitarymatrix.py | 33 +++++ bqskit/utils/test/strategies.py | 19 +++ tests/ir/gates/composed/test_power.py | 137 ++++++++------------- tests/qis/unitary/test_unitarymatrix.py | 21 ++++ 7 files changed, 177 insertions(+), 179 deletions(-) diff --git a/bqskit/ir/gates/composed/powergate.py b/bqskit/ir/gates/composed/powergate.py index 2ee5866df..30d489099 100644 --- a/bqskit/ir/gates/composed/powergate.py +++ b/bqskit/ir/gates/composed/powergate.py @@ -9,7 +9,6 @@ from bqskit.ir.gate import Gate from bqskit.ir.gates.composed.daggergate import DaggerGate from bqskit.ir.gates.composedgate import ComposedGate -from bqskit.ir.gates.constant.identity import IdentityGate from bqskit.qis.unitary.differentiable import DifferentiableUnitary from bqskit.qis.unitary.unitary import RealVector from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix @@ -27,7 +26,7 @@ class PowerGate( The PowerGate is a composed gate that equivalent to the integer power of the input gate. - For example: + Examples: >>> from bqskit.ir.gates import TGate, TdgGate >>> PowerGate(TGate(),2).get_unitary() == TdgGate().get_unitary()*TdgGate().get_unitary() @@ -39,49 +38,34 @@ def __init__(self, gate: Gate, power: int = 1) -> None: Create a gate which is the integer power of the input gate. Args: - gate (Gate): The Gate to conjugate transpose. - power (integer): The power index for the PowerGate + gate (Gate): The Gate to conjugate transpose. + power (int): The power index for the PowerGate. """ if not isinstance(gate, Gate): raise TypeError('Expected gate object, got %s' % type(gate)) if not is_integer(power): - raise TypeError( - f'Expected integer for num_controls, got {type(power)}.', - ) + raise TypeError(f'Expected integer power, got {type(power)}.') self.gate = gate self.power = power - self._name = 'Power(%s)' % gate.name + self._name = f'[{gate.name}^{power}]' self._num_params = gate.num_params self._num_qudits = gate.num_qudits self._radixes = gate.radixes # If input is a constant gate, we can cache the unitary. if self.num_params == 0 and not building_docs(): - self.utry = UnitaryMatrix( - np.linalg.matrix_power( - self.gate.get_unitary(), self.power, - ), self._radixes, - ) + self.utry = self.gate.get_unitary([]).ipower(power) def get_unitary(self, params: RealVector = []) -> UnitaryMatrix: """Return the unitary for this gate, see :class:`Unitary` for more.""" if hasattr(self, 'utry'): return self.utry - return UnitaryMatrix( - np.linalg.matrix_power( - self.gate.get_unitary(params), - self.power, - ), - self._radixes, - ) + return self.gate.get_unitary(params).ipower(self.power) - def get_grad( - self, - params: RealVector = [], - ) -> npt.NDArray[np.complex128]: + def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: """ Return the gradient for this gate. @@ -108,82 +92,64 @@ def get_unitary_and_grad( See :class:`DifferentiableUnitary` for more info. """ + # Constant gate case if hasattr(self, 'utry'): return self.utry, np.array([]) + grad_shape = (self.num_params, self.dim, self.dim) + + # Identity gate case if self.power == 0: - ID = IdentityGate(radixes=self.gate.radixes).get_unitary() - ID_GRAD = 0 * IdentityGate(radixes=self.gate.radixes).get_unitary() - return ID, ID_GRAD + utry = UnitaryMatrix.identity(self.dim) + grad = np.zeros(grad_shape, dtype=np.complex128) + return utry, grad - # powers = {0: IdentityGate(radixes=self.gate.radixes).get_unitary()} - # grads = {0: 0*IdentityGate(radixes=self.gate.radixes).get_unitary()} + # Invert the gate if the power is negative + gate = self.gate if self.power > 0 else DaggerGate(self.gate) + power = abs(self.power) - powers = {} - grads = {} + # Parallel Dicts for unitary and gradient powers + utrys = {} # utrys[i] = gate^(2^i) + grads = {} # grads[i] = d(gate^(2^i))/d(params) # decompose the power as sum of powers of 2 - indexbin = bin(abs(self.power))[2:] - indices = [ - len(indexbin) - 1 - xb.start() - for xb in re.finditer('1', indexbin) + power_bin = bin(abs(power))[2:] + binary_decomp = [ + len(power_bin) - 1 - xb.start() + for xb in re.finditer('1', power_bin) ][::-1] + max_power_of_2 = max(binary_decomp) - powers[0], grads[0] = self.gate.get_unitary_and_grad( # type: ignore + # Base Case: 2^0 + utrys[0], grads[0] = gate.get_unitary_and_grad(params) # type: ignore - params, - ) - # avoid doing computations if not needed - if self.power == 1: - return powers[0], grads[0] - - # check if the power is negative, and - if np.sign(self.power) == -1: - gate = DaggerGate(self.gate) - powers[0], grads[0] = gate.get_unitary_and_grad(params) - - # avoid doing computations if not needed - if abs(self.power) == 1: - return powers[0], grads[0] - - grads[1] = grads[0] @ powers[0] + powers[0] @ grads[0] - powers[1] = powers[0] @ powers[0] - - # avoid doing more computations if not needed - if abs(self.power) == 2: - return powers[1], grads[1] - - # loop over powers of 2 - for i in range(2, indices[-1] + 1): - powers[i] = powers[i - 1] @ powers[i - 1] - grads[i] = grads[i - 1] @ powers[i - 1] + \ - powers[i - 1] @ grads[i - 1] - - unitary = powers[indices[0]] - for i in indices[1:]: - unitary = unitary @ powers[indices[i]] - - grad = 0 * IdentityGate(radixes=self.gate.radixes).get_unitary() - for i in indices: - grad_tmp = grads[i] - for j in indices: - if j < i: - grad_tmp = powers[j] @ grad_tmp - elif j > i: - grad_tmp = grad_tmp @ powers[j] - grad = grad + grad_tmp - - return unitary, grad + # Loop over powers of 2 + for i in range(1, max_power_of_2 + 1): + # u^(2^i) = u^(2^(i-1)) @ u^(2^(i-1)) + utrys[i] = utrys[i - 1] @ utrys[i - 1] + + # d[u^(2^i)] = d[u^(2^(i-1)) @ u^(2^(i-1))] = + grads[i] = grads[i - 1] @ utrys[i - 1] + utrys[i - 1] @ grads[i - 1] + + # Calculate binary composition of the unitary and gradient + utry = utrys[binary_decomp[0]] + grad = grads[binary_decomp[0]] + for i in sorted(binary_decomp[1:]): + grad = grad @ utrys[i] + utry @ grads[i] + utry = utry @ utrys[i] + + return utry, grad def __eq__(self, other: object) -> bool: return ( isinstance(other, PowerGate) and self.gate == other.gate + and self.power == other.power ) def __hash__(self) -> int: - return hash(self.gate) + return hash((self.power, self.gate)) def get_inverse(self) -> Gate: """Return the gate's inverse as a gate.""" - return DaggerGate(self.gate) + return PowerGate(self.gate, -self.power) diff --git a/bqskit/ir/gates/parameterized/u8.py b/bqskit/ir/gates/parameterized/u8.py index 85808f713..ce4c3337e 100644 --- a/bqskit/ir/gates/parameterized/u8.py +++ b/bqskit/ir/gates/parameterized/u8.py @@ -91,12 +91,12 @@ def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: [ # wrt params[0] [-s1 * c2 * p1, c1 * p3, -s1 * s2 * p4], [ - -c1 * c2 * c3 * p1 * p2 * m3, -s1 * \ - c3 * p2, -c1 * s2 * c3 * p2 * m3 * p4, + -c1 * c2 * c3 * p1 * p2 * m3, -s1 + * c3 * p2, -c1 * s2 * c3 * p2 * m3 * p4, ], [ - -c1 * c2 * s3 * p1 * m3 * p5, -s1 * \ - s3 * p5, -c1 * s2 * s3 * m3 * p4 * p5, + -c1 * c2 * s3 * p1 * m3 * p5, -s1 + * s3 * p5, -c1 * s2 * s3 * m3 * p4 * p5, ], ], @@ -142,8 +142,8 @@ def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: [ # wrt params[4] [0, 0, 0], [ - -1j * s1 * c2 * c3 * p1 * p2 * m3, 1j * c1 * \ - c3 * p2, -1j * s1 * s2 * c3 * p2 * m3 * p4, + -1j * s1 * c2 * c3 * p1 * p2 * m3, 1j * c1 + * c3 * p2, -1j * s1 * s2 * c3 * p2 * m3 * p4, ], [1j * s2 * c3 * m2 * m4, 0, -1j * c2 * c3 * m1 * m2], ], @@ -178,8 +178,8 @@ def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: [0, 0, 0], [-1j * s2 * s3 * m4 * m5, 0, 1j * c2 * s3 * m1 * m5], [ - -1j * s1 * c2 * s3 * p1 * m3 * p5, 1j * c1 * \ - s3 * p5, -1j * s1 * s2 * s3 * m3 * p4 * p5, + -1j * s1 * c2 * s3 * p1 * m3 * p5, 1j * c1 + * s3 * p5, -1j * s1 * s2 * s3 * m3 * p4 * p5, ], ], ], diff --git a/bqskit/passes/mapping/placement/trivial.py b/bqskit/passes/mapping/placement/trivial.py index df54d94e5..95ee91d3b 100644 --- a/bqskit/passes/mapping/placement/trivial.py +++ b/bqskit/passes/mapping/placement/trivial.py @@ -20,7 +20,7 @@ async def run(self, circuit: Circuit, data: PassData) -> None: model = BasePass.get_model(circuit, data) data['placement'] = trivial_placement - _logger.info(f'Placed qudits on {data["placement"]}') + _logger.info(f'Placed qudits on {data['placement']}') # Raise an error if this is not a valid placement sg = model.coupling_graph.get_subgraph(data['placement']) diff --git a/bqskit/qis/unitary/unitarymatrix.py b/bqskit/qis/unitary/unitarymatrix.py index 10d55780d..5448d9215 100644 --- a/bqskit/qis/unitary/unitarymatrix.py +++ b/bqskit/qis/unitary/unitarymatrix.py @@ -199,6 +199,22 @@ def otimes(self, *utrys: UnitaryLike) -> UnitaryMatrix: return UnitaryMatrix(utry_acm, radixes_acm) + def ipower(self, power: int) -> UnitaryMatrix: + """ + Calculate the integer power of this unitary. + + Args: + power (int): The integer power to raise the unitary to. + + Returns: + UnitaryMatrix: The resulting unitary matrix. + """ + if power < 0: + mat = np.linalg.matrix_power(self.dagger, -power) + else: + mat = np.linalg.matrix_power(self, power) + return UnitaryMatrix(mat, self.radixes) + def get_unitary(self, params: RealVector = []) -> UnitaryMatrix: """Return the same object, satisfies the :class:`Unitary` API.""" return self @@ -232,6 +248,23 @@ def get_distance_from(self, other: UnitaryLike, degree: int = 2) -> float: dist = np.power(1 - (frac ** degree), 1.0 / degree) return dist if dist > 0.0 else 0.0 + def isclose(self, other: UnitaryLike, tol: float = 1e-6) -> bool: + """ + Check if `self` is approximately equal to `other` upto global phase. + + Args: + other (UnitaryLike): The unitary to compare to. + + tol (float): The numerical precision of the check. + + Returns: + bool: True if `self` is close to `other`. + + See Also: + - :func:`get_distance_from` for the error function used. + """ + return self.get_distance_from(other) < tol + def get_statevector(self, in_state: StateLike) -> StateVector: """ Calculate the output state after applying this unitary to `in_state`. diff --git a/bqskit/utils/test/strategies.py b/bqskit/utils/test/strategies.py index de0e5e2dc..420ffc52a 100644 --- a/bqskit/utils/test/strategies.py +++ b/bqskit/utils/test/strategies.py @@ -40,6 +40,7 @@ from bqskit.ir.region import CircuitRegion from bqskit.qis.state.state import StateLike from bqskit.qis.state.state import StateVector +from bqskit.qis.unitary import RealVector from bqskit.qis.unitary import UnitaryMatrix from bqskit.qis.unitary.unitarymatrix import UnitaryLike from bqskit.utils.typing import is_integer @@ -258,6 +259,24 @@ def gates( return gate +@composite +def gates_and_params( + draw: Any, + radixes: Sequence[int] | int | None = None, + constant: bool | None = None, +) -> tuple[Gate, RealVector]: + """Hypothesis strategy for generating gates and parameters.""" + gate = draw(gates(radixes, constant)) + params = draw( + lists( + floats(allow_nan=False, allow_infinity=False, width=16), + min_size=gate.num_params, + max_size=gate.num_params, + ), + ) + return gate, params + + @composite def operations( draw: Any, diff --git a/tests/ir/gates/composed/test_power.py b/tests/ir/gates/composed/test_power.py index 6e3e9b1fe..00d5fe27d 100644 --- a/tests/ir/gates/composed/test_power.py +++ b/tests/ir/gates/composed/test_power.py @@ -3,94 +3,53 @@ from __future__ import annotations import numpy as np +import numpy.typing as npt +from hypothesis import given +from hypothesis.strategies import integers -from bqskit.ir.gates import DaggerGate +from bqskit.ir.gate import Gate from bqskit.ir.gates import PowerGate -from bqskit.ir.gates import RXGate -from bqskit.ir.gates import RYGate -from bqskit.ir.gates import RZGate - - -test_power = lambda gate, power, params: np.linalg.matrix_power( - gate.get_unitary([params]), power, -) - - -def square_grad(gate, params): - g, gd = gate.get_unitary_and_grad([params]) - return g @ gd + gd @ g - - -def third_power_grad(gate, params): - g, gd = gate.get_unitary_and_grad([params]) - return g @ square_grad(gate, params) + gd @ test_power(gate, 2, params) - - -def quartic_power_grad(gate, params): - g, gd = gate.get_unitary_and_grad([params]) - return g @ third_power_grad(gate, params) + gd @ test_power(gate, 3, params) - - -def power_gate_grads(gate, power, params): - if power == 2: - return square_grad(gate, params) - elif power == -2: - return square_grad(DaggerGate(gate), params) - elif power == 3: - return third_power_grad(gate, params) - elif power == -3: - return third_power_grad(DaggerGate(gate), params) - elif power == 4: - return quartic_power_grad(gate, power) - elif power == -4: - return quartic_power_grad(DaggerGate(gate), power) - - -def test(test_gate, indices, params, error) -> None: - - # test index 1 - for param in params: - pgt, pgdt = test_gate.get_unitary_and_grad([param]) - pgate = PowerGate(test_gate, 1) - pg, pgd = pgate.get_unitary_and_grad([param]) - assert np.sum(abs(pg - pgt)) < error - assert np.sum(abs(pgd - pgdt)) < error - - # test index -1 - for param in params: - pgt, pgdt = DaggerGate(test_gate).get_unitary_and_grad([param]) - pgate = PowerGate(test_gate, -1) - pg, pgd = pgate.get_unitary_and_grad([param]) - assert np.sum(abs(pg - pgt)) < error - assert np.sum(abs(pgd - pgdt)) < error - - # test other indices - for index in indices: - for param in params: - gate = test_power(test_gate, index, param) - grad = power_gate_grads(test_gate, index, param) - - pgate = PowerGate(test_gate, index) - pg, pgd = pgate.get_unitary_and_grad([param]) - assert np.sum(abs(pg - gate)) < error - assert np.sum(abs(pgd - grad)) < error - - -error = 1e-14 -params = [-0.7, -0.3, 0.2, 1.4] -indices = [-4, -3, -2, 2, 3, 4] - - -def test_x() -> None: - global error, indices, parames - test(RXGate(), indices, params, error) - - -def test_y() -> None: - global error, indices, parames - test(RYGate(), indices, params, error) - - -def test_z() -> None: - global error, indices, parames - test(RZGate(), indices, params, error) +from bqskit.qis.unitary.differentiable import DifferentiableUnitary +from bqskit.qis.unitary.unitary import RealVector +from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix +from bqskit.utils.test.strategies import gates_and_params + + +def _recursively_calc_power_grad( + g: UnitaryMatrix, + dg: npt.NDArray[np.complex128], + power: int, +) -> npt.NDArray[np.complex128]: + """D(g^n+1) = d(g@g^n) = g @ d(g^n) + dg @ g^n.""" + if len(dg) == 0: + return np.zeros_like(dg) + if power < 0: + return _recursively_calc_power_grad( + g.dagger, + dg.conj().transpose([0, 2, 1]), + -power, + ) + if power == 1: + return dg + dgn = _recursively_calc_power_grad(g, dg, power - 1) + return g @ dgn + dg @ g.ipower(power - 1) + + +@given(gates_and_params(), integers(min_value=-10, max_value=10)) +def test_power_gate(g_and_p: tuple[Gate, RealVector], power: int) -> None: + gate, params = g_and_p + pgate = PowerGate(gate, power) + actual_unitary = pgate.get_unitary(params) + expected_unitary = gate.get_unitary(params).ipower(power) + assert actual_unitary.isclose(expected_unitary) + + if not isinstance(gate, DifferentiableUnitary): + return + + actual_grad = pgate.get_grad(params) + expected_grad = _recursively_calc_power_grad( + gate.get_unitary(params), + gate.get_grad(params), + power, + ) + assert np.allclose(actual_grad, expected_grad) diff --git a/tests/qis/unitary/test_unitarymatrix.py b/tests/qis/unitary/test_unitarymatrix.py index 046057c45..045ddc433 100644 --- a/tests/qis/unitary/test_unitarymatrix.py +++ b/tests/qis/unitary/test_unitarymatrix.py @@ -216,3 +216,24 @@ def test_scalar_multiplication(self, u: UnitaryMatrix, a: float) -> None: out2 = a * u assert out2 is not u assert not isinstance(out2, UnitaryMatrix) + + +@given(unitaries(), integers(min_value=-10, max_value=10)) +def test_ipower(u: UnitaryMatrix, n: int) -> None: + out = u.ipower(n) + if n == 0: + assert out == UnitaryMatrix.identity(u.dim, u.radixes) + elif n == 1: + assert out == u + elif n == -1: + assert out == u.dagger + elif n < 0: + acm = u.dagger + for _ in range(-n - 1): + acm = acm @ u.dagger + assert out == acm + else: + acm = u + for _ in range(n - 1): + acm = acm @ u + assert out == acm From 13d0452430f45f655aa501c37bdf97964d91c781 Mon Sep 17 00:00:00 2001 From: Marc Davis Date: Mon, 22 Jul 2024 17:32:54 -0400 Subject: [PATCH 083/197] made the RuntimeError shorter --- bqskit/runtime/task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index 4ee2a6453..b200b54d9 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -106,7 +106,7 @@ def cancel(self) -> None: if self.coro is not None: self.coro.close() else: - raise RuntimeError('Task was cancelled before its coroutine was started.') + raise RuntimeError('Task was cancelled with None coroutine.') async def run(self) -> Any: """Task coroutine wrapper.""" From 05f6790b311611bc376a2ec888f0aeafc1232b38 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 29 Jul 2024 08:32:14 -0400 Subject: [PATCH 084/197] Fix --- bqskit/passes/mapping/placement/trivial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/passes/mapping/placement/trivial.py b/bqskit/passes/mapping/placement/trivial.py index 95ee91d3b..df54d94e5 100644 --- a/bqskit/passes/mapping/placement/trivial.py +++ b/bqskit/passes/mapping/placement/trivial.py @@ -20,7 +20,7 @@ async def run(self, circuit: Circuit, data: PassData) -> None: model = BasePass.get_model(circuit, data) data['placement'] = trivial_placement - _logger.info(f'Placed qudits on {data['placement']}') + _logger.info(f'Placed qudits on {data["placement"]}') # Raise an error if this is not a valid placement sg = model.coupling_graph.get_subgraph(data['placement']) From 111d0ae8e34f0d5a5d7ebeff8fbc57eef99d75f9 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 31 Jul 2024 09:27:22 -0400 Subject: [PATCH 085/197] Fixed test --- tests/ir/gates/composed/test_power.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/ir/gates/composed/test_power.py b/tests/ir/gates/composed/test_power.py index 00d5fe27d..64d156e7b 100644 --- a/tests/ir/gates/composed/test_power.py +++ b/tests/ir/gates/composed/test_power.py @@ -46,6 +46,9 @@ def test_power_gate(g_and_p: tuple[Gate, RealVector], power: int) -> None: if not isinstance(gate, DifferentiableUnitary): return + if gate.num_params == 0: + return + actual_grad = pgate.get_grad(params) expected_grad = _recursively_calc_power_grad( gate.get_unitary(params), From 0641ad1c49ffde1372a36b27123be8a0de48e35b Mon Sep 17 00:00:00 2001 From: Justin Kalloor Date: Wed, 31 Jul 2024 13:12:26 -0700 Subject: [PATCH 086/197] Fixing Docstrings --- bqskit/passes/processing/treescan.py | 46 ++++++++++++++++++---------- 1 file changed, 30 insertions(+), 16 deletions(-) diff --git a/bqskit/passes/processing/treescan.py b/bqskit/passes/processing/treescan.py index 230596196..71ff203c4 100644 --- a/bqskit/passes/processing/treescan.py +++ b/bqskit/passes/processing/treescan.py @@ -1,4 +1,4 @@ -"""This module implements the ScanningGateRemovalPass.""" +"""This module implements the TreeScanningGateRemovalPass.""" from __future__ import annotations import logging @@ -20,21 +20,22 @@ class TreeScanningGateRemovalPass(BasePass): """ - The ScanningGateRemovalPass class. + The TreeScanningGateRemovalPass class. Starting from one side of the circuit, run the following: - Split the circuit operations into chunks of size tree_depth + Split the circuit operations into chunks of size `tree_depth` At every iteration: a. Look at the next chunk of operations - b. Generate 2 ^ tree_depth circuits. Each circuit corresponds to every + b. Generate 2 ^ `tree_depth` circuits. Each circuit corresponds to every combination of whether or not to include one of the operations in the chunk. - c. Instantiate in parallel all 2^tree_depth circuits + c. Instantiate in parallel all 2^`tree_depth` circuits d. Choose the circuit that has the least number of operations and move on to the next chunk of operations. - This optimization is less greedy than the current ScanningGate removal, - which we see can offermuch better quality circuits than ScanningGate. + This optimization is less greedy than the current + :class:`~bqskit.passes.processing.ScanningGateRemovalPass` removal, + which leads to much better quality circuits than ScanningGate. In very rare occasions, ScanningGate may be able to outperform TreeScan (since it is still greedy), but in general we can expect TreeScan to almost always outperform ScanningGate. @@ -50,7 +51,7 @@ def __init__( collection_filter: Callable[[Operation], bool] | None = None, ) -> None: """ - Construct a ScanningGateRemovalPass. + Construct a TreeScanningGateRemovalPass. Args: start_from_left (bool): Determines where the scan starts @@ -133,11 +134,24 @@ def get_tree_circs( circuit_copy: Circuit, cycle_and_ops: list[tuple[int, Operation]], ) -> list[Circuit]: - ''' - Given a circuit, create 2^(tree_depth) - 1 circuits that remove up - to tree_depth operations. The circuits are sorted by the number of - operations removed. - ''' + """ + Generate all circuits to be instantiated in the tree scan. + + Args: + orig_num_cycles (int): The original number of cycles + in the circuit. This allows us to keep track of the shift + caused by previous deletions. + + circuit_copy (Circuit): Current state of the circuit. + + cycle_and_ops: list[(int, Operation)]: The next chunk + of operations to be considered for deletion. + + Returns: + A list of 2^(`tree_depth`) - 1 circuits that remove up + to `tree_depth` operations. The circuits are sorted by + the number of operations removed. + """ all_circs = [circuit_copy.copy()] for cycle, op in cycle_and_ops: new_circs = [] @@ -162,7 +176,7 @@ async def run(self, circuit: Circuit, data: PassData) -> None: instantiate_options['seed'] = data.seed start = 'left' if self.start_from_left else 'right' - _logger.debug(f'Starting scanning gate removal on the {start}.') + _logger.debug(f'Starting tree scanning gate removal on the {start}.') target = self.get_target(circuit, data) # target = None @@ -172,8 +186,8 @@ async def run(self, circuit: Circuit, data: PassData) -> None: ops_left = list(circuit.operations_with_cycles(reverse=reverse_iter)) print( - f'Starting Scan with tree depth {self.tree_depth}' - ' on circuit with {len(ops_left)} gates', + f'Starting TreeScan with tree depth {self.tree_depth}' + f' on circuit with {len(ops_left)} gates', ) while ops_left: From af73a8eb7f52e88f9341f5360224058f00a6e049 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 31 Jul 2024 16:33:11 -0400 Subject: [PATCH 087/197] Implement `CouplingGraph.is_linear` --- bqskit/qis/graph.py | 21 +++++++++++++++++++++ tests/qis/test_graph.py | 10 ++++++++++ 2 files changed, 31 insertions(+) diff --git a/bqskit/qis/graph.py b/bqskit/qis/graph.py index 06994ef34..7a7ae7c30 100644 --- a/bqskit/qis/graph.py +++ b/bqskit/qis/graph.py @@ -92,6 +92,27 @@ def is_fully_connected(self) -> bool: return False + def is_linear(self) -> bool: + """Return true if the graph is linearly connected.""" + if self.num_qudits < 2: + return False + + num_deg_1 = 0 + for node_neighbors in self._adj: + if len(node_neighbors) == 1: + num_deg_1 += 1 + + elif len(node_neighbors) == 0: + return False + + elif len(node_neighbors) > 2: + return False + + if num_deg_1 != 2: + return False + + return True + def get_neighbors_of(self, qudit: int) -> list[int]: """Return the qudits adjacent to `qudit`.""" return list(self._adj[qudit]) diff --git a/tests/qis/test_graph.py b/tests/qis/test_graph.py index d4ea681df..577ab6a1c 100644 --- a/tests/qis/test_graph.py +++ b/tests/qis/test_graph.py @@ -63,3 +63,13 @@ def test_invalid(self) -> None: with pytest.raises(TypeError): coupling_graph.get_subgraph('a') # type: ignore + +def test_is_linear() -> None: + coupling_graph = CouplingGraph({(0, 1), (1, 2), (2, 3)}) + assert coupling_graph.is_linear() + + coupling_graph = CouplingGraph({(0, 1), (1, 2), (0, 3), (2, 3)}) + assert not coupling_graph.is_linear() + + coupling_graph = CouplingGraph.all_to_all(4) + assert not coupling_graph.is_linear() From 9a50b4782187b0eab79d365dceb414f9651062db Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 31 Jul 2024 16:33:53 -0400 Subject: [PATCH 088/197] Update documentation on surround --- bqskit/ir/circuit.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index 7d6d06b2a..2bcf7077f 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -2180,8 +2180,8 @@ def surround( function determines if a candidate region is valid in the caller's context. This is used to prune the search space of the surround function. If None, then no filtering is - done. It takes a CircuitRegion and returns a - boolean. + done. It takes a CircuitRegion and returns a boolean. + Only regions that pass the filter are considered. scoring_fn (Callable[[CircuitRegion], float] | None): The scoring function determines the "best" surrounding region. From b47367a7b89510fd6dc60759526d0c9bc6a9c9fd Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 31 Jul 2024 16:34:24 -0400 Subject: [PATCH 089/197] Updated default surround scoring fn --- bqskit/ir/circuit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index 2bcf7077f..2c32fcde4 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -2225,7 +2225,7 @@ def surround( raise TypeError(f'Expected callable filter, got {type(filter)}.') def default_scoring_fn(region: CircuitRegion) -> float: - return float(sum(op.num_qudits for op in self[region])) + return float(sum(op.num_qudits * 100 for op in self[region])) if scoring_fn is None: scoring_fn = default_scoring_fn From 991350ad4a743016883b5b04f6381f1e5a15da91 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 31 Jul 2024 16:35:09 -0400 Subject: [PATCH 090/197] New tests for surround filter --- tests/ir/circuit/test_region_methods.py | 32 ++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/tests/ir/circuit/test_region_methods.py b/tests/ir/circuit/test_region_methods.py index 5dfd50c19..669564dda 100644 --- a/tests/ir/circuit/test_region_methods.py +++ b/tests/ir/circuit/test_region_methods.py @@ -387,11 +387,41 @@ def test_surround_filter_hard(self) -> None: ) assert region.location == CircuitLocation([2, 3, 4, 5]) + def test_surround_filter_topology(self) -> None: + circuit = Circuit(5) + circuit.append_gate(CNOTGate(), [0, 1]) + circuit.append_gate(CNOTGate(), [0, 2]) + circuit.append_gate(CNOTGate(), [0, 1]) + circuit.append_gate(CNOTGate(), [0, 2]) + circuit.append_gate(CNOTGate(), [1, 2]) + circuit.append_gate(CNOTGate(), [2, 3]) + circuit.append_gate(CNOTGate(), [3, 4]) -def test_check_region() -> None: + def region_filter(region: CircuitRegion) -> bool: + print(circuit.get_slice(region.points).coupling_graph, circuit.get_slice(region.points).coupling_graph.is_linear(), region) + return circuit.get_slice(region.points).coupling_graph.is_linear() + + region = circuit.surround( + (4, 1), 4, None, None, lambda region: ( + region_filter(region) + ), + ) + print(region) + assert circuit.is_valid_region(region) + assert region.location == CircuitLocation([1, 2, 3, 4]) + + +def test_check_region_1() -> None: c = Circuit(4) c.append_gate(CNOTGate(), [1, 2]) c.append_gate(CNOTGate(), [0, 1]) c.append_gate(CNOTGate(), [2, 3]) c.append_gate(CNOTGate(), [1, 2]) assert not c.is_valid_region({1: (0, 2), 2: (0, 2), 3: (0, 2)}) + +def test_check_region_2() -> None: + c = Circuit(3) + c.append_gate(CNOTGate(), [0, 1]) + c.append_gate(CNOTGate(), [0, 2]) + c.append_gate(CNOTGate(), [1, 2]) + assert not c.is_valid_region({0: (0, 0), 1: (0, 2), 2: (2, 2)}) From 0242460aec37c08056796847892ec8a3f8ddd224 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 31 Jul 2024 16:35:26 -0400 Subject: [PATCH 091/197] Fixed bug --- bqskit/ir/circuit.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index 2c32fcde4..7e19c6e25 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -2289,9 +2289,11 @@ def default_scoring_fn(region: CircuitRegion) -> float: region_bldr = {k: v for k, v in node.items()} op = self[point] valid_region = True + need_to_fully_check = False for qudit in op.location: if qudit not in region_bldr: region_bldr[qudit] = CycleInterval(point[0], point[0]) + need_to_fully_check = True elif point[0] < region_bldr[qudit][0]: # Check for gates in the middle not in region @@ -2347,6 +2349,10 @@ def default_scoring_fn(region: CircuitRegion) -> float: if not valid_region: continue + if need_to_fully_check: + if not self.is_valid_region(region_bldr): + continue + new_region = CircuitRegion(region_bldr) # Check uniqueness From d9fe8208017249c9047e109b6b3cfdcc608fd5d0 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 31 Jul 2024 13:48:40 -0700 Subject: [PATCH 092/197] Updated documentation --- bqskit/qis/pauliz.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/bqskit/qis/pauliz.py b/bqskit/qis/pauliz.py index 32d0a01f6..819755671 100644 --- a/bqskit/qis/pauliz.py +++ b/bqskit/qis/pauliz.py @@ -1,4 +1,4 @@ -"""This module implements the PauliMatrices class.""" +"""This module implements the PauliZMatrices class.""" from __future__ import annotations import itertools as it @@ -23,8 +23,25 @@ class PauliZMatrices(Sequence[npt.NDArray[np.complex128]]): """ The group of Pauli Z matrices. - A PauliZMatrices object represents the entire of set of Pauli Z matrices for - some number of qubits. + A PauliZMatrices object represents the entire of set of diagonal Hermitian + matrices for some number of qubits. These matrices are a linear combination + of all n-fold tensor products of Pauli Z and the identity matrix. + + Examples: + .. math:: + I + Z = \\begin{pmatrix} + 2 & 0 \\\\ + 0 & 0 \\\\ + \\end{pmatrix} + + .. math:: + I \\otimes I + Z \\otimes I + 3 I \\otimes Z - Z \\otimes Z = + \\begin{pmatrix} + 4 & 0 & 0 & 0 \\\\ + 0 & -2 & 0 & 0 \\\\ + 0 & 0 & 2 & 0 \\\\ + 0 & 0 & 0 & -2 \\\\ + \\end{pmatrix} """ Z = np.array( From 0f7a8727c5ff01779e608d2a48abccbbba9503e8 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 31 Jul 2024 13:58:24 -0700 Subject: [PATCH 093/197] Updated documentation --- bqskit/qis/pauliz.py | 18 +++++++++--------- bqskit/utils/math.py | 8 ++++---- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/bqskit/qis/pauliz.py b/bqskit/qis/pauliz.py index 819755671..33541b5dc 100644 --- a/bqskit/qis/pauliz.py +++ b/bqskit/qis/pauliz.py @@ -167,7 +167,7 @@ def get_projection_matrices( # Nth Order Pauli Z Matrices can be thought of base 2 number # I = 0, Z = 1 - # IZZ = 1 * 2^2 + 1 * 2^1 + 0 * 4^0 = 6 (base 10) + # IZZ = 1 * 2^2 + 1 * 2^1 + 0 * 2^0 = 6 (base 10) # This gives the idx of IZZ in paulizs # Note we read qubit index from the left, # so Z in ZII corresponds to q = 0 @@ -185,14 +185,14 @@ def dot_product(self, alpha: RealVector) -> npt.NDArray[np.complex128]: Computes the standard dot product of `alpha` with the paulis. Args: - alpha (RealVector): The pauli coefficients. + alpha (RealVector): The Pauli Z coefficients. Returns: np.ndarray: Sum of element-wise multiplication of `alpha` - and `self.paulis`. + and `self.paulizs`. Raises: - ValueError: If `alpha` and `self.paulis` are incompatible. + ValueError: If `alpha` and `self.paulizs` are incompatible. """ if not is_sequence(alpha) or not all(is_numeric(a) for a in alpha): @@ -216,14 +216,14 @@ def from_string( Construct Pauli Z matrices from a string description. Args: - pauli_string (str): A string that describes the desired matrices. - This is a comma-seperated list of pauli strings. - A pauli string has the following regex pattern: [IZ]+ + pauliz_string (str): A string that describes the desired matrices. + This is a comma-seperated list of Pauli Z strings. + A Pauli Z string has the following regex pattern: [IZ]+ Returns: - np.ndarray | list[np.ndarray]: Either the single pauli Z matrix + np.ndarray | list[np.ndarray]: Either the single Pauli Z matrix if only one is constructed, or the list of the constructed - pauli Z matrices. + Pauli Z matrices. Raises: ValueError: if `pauliz_string` is invalid. diff --git a/bqskit/utils/math.py b/bqskit/utils/math.py index 839be0441..eca1dc490 100644 --- a/bqskit/utils/math.py +++ b/bqskit/utils/math.py @@ -174,16 +174,16 @@ def pauliz_expansion(H: npt.NDArray[np.complex128]) -> npt.NDArray[np.float64]: Computes a Pauli Z expansion of the diagonal hermitian matrix H. Args: - H (np.ndarray): The diagonal hermitian matrix to expand. + H (np.ndarray): The (N, N) diagonal hermitian matrix to expand. Returns: np.ndarray: The coefficients of a Pauli Z expansion for H, - i.e., X dot Sigma = H where Sigma contains Pauli Z matrices of + i.e., x dot Sigma = H where Sigma contains Pauli Z matrices of same size of H. Note: - This assumes the input is diagonal. No check is done for hermicity. - The output is undefined on non-hermitian inputs. + This assumes the input is diagonal but of shape (N, N). No check is + done for hermicity. The output is undefined on non-hermitian inputs. """ diag_H = np.diag(np.diag(H)) if not np.allclose(H, diag_H): From 9bcda45b26619dd859d6af8843c79e1452d7e24d Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 31 Jul 2024 14:26:21 -0700 Subject: [PATCH 094/197] Faster get_unitary --- bqskit/ir/gates/parameterized/pauliz.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/bqskit/ir/gates/parameterized/pauliz.py b/bqskit/ir/gates/parameterized/pauliz.py index 12d51e2f2..a2771e9fd 100644 --- a/bqskit/ir/gates/parameterized/pauliz.py +++ b/bqskit/ir/gates/parameterized/pauliz.py @@ -51,18 +51,18 @@ def __init__(self, num_qudits: int) -> None: self._name = f'PauliZGate({num_qudits})' self._num_qudits = num_qudits - self.paulis = PauliZMatrices(self.num_qudits) - self._num_params = len(self.paulis) + paulizs = PauliZMatrices(self.num_qudits) + self._num_params = len(paulizs) if building_docs(): self.sigmav: npt.NDArray[Any] = np.array([]) else: - self.sigmav = (-1j / 2) * self.paulis.numpy + self.sigmav = (-1j / 2) * paulizs.numpy def get_unitary(self, params: RealVector = []) -> UnitaryMatrix: """Return the unitary for this gate, see :class:`Unitary` for more.""" self.check_parameters(params) H = dot_product(params, self.sigmav) - eiH = sp.linalg.expm(H) + eiH = np.diag(np.exp(np.diag(H))) return UnitaryMatrix(eiH, check_arguments=False) def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: @@ -70,9 +70,10 @@ def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]: Return the gradient for this gate. See :class:`DifferentiableUnitary` for more info. + + TODO: Accelerated gradient computation for diagonal matrices. """ self.check_parameters(params) - H = dot_product(params, self.sigmav) _, dU = dexpmv(H, self.sigmav) return dU From 0bc4a4e39d664a22bb31bdeb64d85db39c2247dd Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 31 Jul 2024 14:26:40 -0700 Subject: [PATCH 095/197] Updated documentation --- bqskit/passes/synthesis/diagonal.py | 4 ++-- bqskit/utils/math.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bqskit/passes/synthesis/diagonal.py b/bqskit/passes/synthesis/diagonal.py index 73a03a6c2..19a775406 100644 --- a/bqskit/passes/synthesis/diagonal.py +++ b/bqskit/passes/synthesis/diagonal.py @@ -76,12 +76,12 @@ async def synthesize( """Synthesize `utry`, see :class:`SynthesisPass` for more.""" if not isinstance(utry, UnitaryMatrix): m = 'DiagonalSynthesisPass can only synthesize diagonal, ' - m += f'UnitaryMatrixs, got {type(utry)}.' + m += f'`UnitaryMatrix`s, got {type(utry)}.' raise TypeError(m) if not utry.is_qubit_only(): m = 'DiagonalSynthesisPass can only synthesize diagonal ' - m += 'UnitaryMatrixs with qubits, got higher radix than 2.' + m += '`UnitaryMatrix`s with qubits, got higher radix than 2.' raise ValueError(m) num_qubits = utry.num_qudits diff --git a/bqskit/utils/math.py b/bqskit/utils/math.py index eca1dc490..53cd5cddb 100644 --- a/bqskit/utils/math.py +++ b/bqskit/utils/math.py @@ -20,7 +20,7 @@ def dexpmv( User must provide M and its derivative dM. If the argument dM is a vector of partials then dF will be the respective partial vector. - This is done using a Pade Approximat with scaling and squaring. + This is done using a Pade Approximation with scaling and squaring. Args: M (np.ndarray): Matrix to exponentiate. From 01d5bcdac9d61448f4426179f52fd50c9a331664 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 31 Jul 2024 14:36:09 -0700 Subject: [PATCH 096/197] Added reference, now a BasePass --- bqskit/passes/synthesis/diagonal.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/bqskit/passes/synthesis/diagonal.py b/bqskit/passes/synthesis/diagonal.py index 19a775406..c2fc5c8c5 100644 --- a/bqskit/passes/synthesis/diagonal.py +++ b/bqskit/passes/synthesis/diagonal.py @@ -9,7 +9,7 @@ from bqskit.ir.circuit import Circuit from bqskit.ir.gates import CNOTGate from bqskit.ir.gates import RZGate -from bqskit.passes.synthesis.synthesis import SynthesisPass +from bqskit.compiler.basepass import BasePass from bqskit.qis.state.state import StateVector from bqskit.qis.state.system import StateSystem from bqskit.qis.unitary import UnitaryMatrix @@ -20,8 +20,12 @@ _logger = logging.getLogger(__name__) -class DiagonalSynthesisPass(SynthesisPass): - """A pass that synthesizes diagonal unitaries.""" +class DiagonalSynthesisPass(BasePass): + """ + A pass that synthesizes diagonal unitaries. + + Based on: https://arxiv.org/abs/1306.3991 + """ def __init__( self, From 40b190b0e854e95fdf84f15c4af2ce6331e12297 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 31 Jul 2024 14:37:37 -0700 Subject: [PATCH 097/197] Added TODO note for qudit support --- bqskit/passes/synthesis/diagonal.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/passes/synthesis/diagonal.py b/bqskit/passes/synthesis/diagonal.py index c2fc5c8c5..4d74bcef0 100644 --- a/bqskit/passes/synthesis/diagonal.py +++ b/bqskit/passes/synthesis/diagonal.py @@ -39,8 +39,8 @@ def __init__( less than this are rounded to zero. (Default: 1e-8) TODO: - - Optimize Pauli string ordering - Cancel adjacent CNOTs + - See how QFAST can be used to generalize to qudits """ self.parameter_precision = parameter_precision From 7363ef7c43d791cf0748186cedfdb549458fdfde Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 31 Jul 2024 15:34:09 -0700 Subject: [PATCH 098/197] Changed to SynthesisPass --- bqskit/passes/synthesis/diagonal.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bqskit/passes/synthesis/diagonal.py b/bqskit/passes/synthesis/diagonal.py index 4d74bcef0..e199f4878 100644 --- a/bqskit/passes/synthesis/diagonal.py +++ b/bqskit/passes/synthesis/diagonal.py @@ -9,7 +9,7 @@ from bqskit.ir.circuit import Circuit from bqskit.ir.gates import CNOTGate from bqskit.ir.gates import RZGate -from bqskit.compiler.basepass import BasePass +from bqskit.passes.synthesis import SynthesisPass from bqskit.qis.state.state import StateVector from bqskit.qis.state.system import StateSystem from bqskit.qis.unitary import UnitaryMatrix @@ -20,10 +20,10 @@ _logger = logging.getLogger(__name__) -class DiagonalSynthesisPass(BasePass): +class DiagonalSynthesisPass(SynthesisPass): """ A pass that synthesizes diagonal unitaries. - + Based on: https://arxiv.org/abs/1306.3991 """ From db69e48e32cb6f83287d10524bf4d199c940fd5b Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 31 Jul 2024 15:34:21 -0700 Subject: [PATCH 099/197] pre-commit --- bqskit/ir/gates/parameterized/pauliz.py | 1 - bqskit/qis/pauliz.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/bqskit/ir/gates/parameterized/pauliz.py b/bqskit/ir/gates/parameterized/pauliz.py index a2771e9fd..4eb42d6d7 100644 --- a/bqskit/ir/gates/parameterized/pauliz.py +++ b/bqskit/ir/gates/parameterized/pauliz.py @@ -5,7 +5,6 @@ import numpy as np import numpy.typing as npt -import scipy as sp from bqskit.ir.gates.generalgate import GeneralGate from bqskit.ir.gates.qubitgate import QubitGate diff --git a/bqskit/qis/pauliz.py b/bqskit/qis/pauliz.py index 33541b5dc..101324596 100644 --- a/bqskit/qis/pauliz.py +++ b/bqskit/qis/pauliz.py @@ -35,7 +35,7 @@ class PauliZMatrices(Sequence[npt.NDArray[np.complex128]]): \\end{pmatrix} .. math:: - I \\otimes I + Z \\otimes I + 3 I \\otimes Z - Z \\otimes Z = + I \\otimes I + Z \\otimes I + 3 I \\otimes Z - Z \\otimes Z = \\begin{pmatrix} 4 & 0 & 0 & 0 \\\\ 0 & -2 & 0 & 0 \\\\ From 0c2d0bd1192c75c6dd8e92f936a231730367121d Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 31 Jul 2024 15:38:39 -0700 Subject: [PATCH 100/197] Fixed SynthesisPass import --- bqskit/passes/synthesis/diagonal.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/passes/synthesis/diagonal.py b/bqskit/passes/synthesis/diagonal.py index e199f4878..194d84eb4 100644 --- a/bqskit/passes/synthesis/diagonal.py +++ b/bqskit/passes/synthesis/diagonal.py @@ -9,7 +9,7 @@ from bqskit.ir.circuit import Circuit from bqskit.ir.gates import CNOTGate from bqskit.ir.gates import RZGate -from bqskit.passes.synthesis import SynthesisPass +from bqskit.passes.synthesis.synthesis import SynthesisPass from bqskit.qis.state.state import StateVector from bqskit.qis.state.system import StateSystem from bqskit.qis.unitary import UnitaryMatrix From ac92a55d340fd8df8e6483b781f78851ef1c5674 Mon Sep 17 00:00:00 2001 From: Justin Kalloor Date: Wed, 31 Jul 2024 16:53:37 -0700 Subject: [PATCH 101/197] Fixing Return docstring --- bqskit/passes/processing/treescan.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bqskit/passes/processing/treescan.py b/bqskit/passes/processing/treescan.py index 71ff203c4..4f8889582 100644 --- a/bqskit/passes/processing/treescan.py +++ b/bqskit/passes/processing/treescan.py @@ -148,9 +148,9 @@ def get_tree_circs( of operations to be considered for deletion. Returns: - A list of 2^(`tree_depth`) - 1 circuits that remove up - to `tree_depth` operations. The circuits are sorted by - the number of operations removed. + list[Circuit]: A list of 2^(`tree_depth`) - 1 circuits + that remove up to `tree_depth` operations. The circuits + are sorted by the number of operations removed. """ all_circs = [circuit_copy.copy()] for cycle, op in cycle_and_ops: From 1771a7f435ff0ed35ef91ee55cb64128cbd86a31 Mon Sep 17 00:00:00 2001 From: Justin Kalloor Date: Thu, 1 Aug 2024 10:36:20 -0700 Subject: [PATCH 102/197] Removing unecessary lines --- bqskit/passes/processing/treescan.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/bqskit/passes/processing/treescan.py b/bqskit/passes/processing/treescan.py index 4f8889582..12376e59d 100644 --- a/bqskit/passes/processing/treescan.py +++ b/bqskit/passes/processing/treescan.py @@ -179,7 +179,6 @@ async def run(self, circuit: Circuit, data: PassData) -> None: _logger.debug(f'Starting tree scanning gate removal on the {start}.') target = self.get_target(circuit, data) - # target = None circuit_copy = circuit.copy() reverse_iter = not self.start_from_left @@ -194,8 +193,6 @@ async def run(self, circuit: Circuit, data: PassData) -> None: chunk = ops_left[:self.tree_depth] ops_left = ops_left[self.tree_depth:] - # Circuits of size 2 ** tree_depth - 1, - # ranked in order of most to fewest deletions all_circs = TreeScanningGateRemovalPass.get_tree_circs( circuit.num_cycles, circuit_copy, chunk, ) From 5062635be10313e5a388a13bc78ef50309f2b528 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 1 Aug 2024 13:47:03 -0400 Subject: [PATCH 103/197] Fixed test --- tests/ir/gates/composed/test_power.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ir/gates/composed/test_power.py b/tests/ir/gates/composed/test_power.py index 64d156e7b..4f19f57ae 100644 --- a/tests/ir/gates/composed/test_power.py +++ b/tests/ir/gates/composed/test_power.py @@ -21,7 +21,7 @@ def _recursively_calc_power_grad( power: int, ) -> npt.NDArray[np.complex128]: """D(g^n+1) = d(g@g^n) = g @ d(g^n) + dg @ g^n.""" - if len(dg) == 0: + if len(dg) == 0 or power == 0: return np.zeros_like(dg) if power < 0: return _recursively_calc_power_grad( @@ -34,7 +34,7 @@ def _recursively_calc_power_grad( dgn = _recursively_calc_power_grad(g, dg, power - 1) return g @ dgn + dg @ g.ipower(power - 1) - +from bqskit.ir.gates import CRYGate @given(gates_and_params(), integers(min_value=-10, max_value=10)) def test_power_gate(g_and_p: tuple[Gate, RealVector], power: int) -> None: gate, params = g_and_p From 4672a5aa5450933d57ab5a2d6af33b94cbcd0b14 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 1 Aug 2024 13:54:20 -0400 Subject: [PATCH 104/197] Cleaned up leftover from previous fix --- bqskit/passes/mapping/placement/trivial.py | 2 +- tests/ir/gates/composed/test_power.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bqskit/passes/mapping/placement/trivial.py b/bqskit/passes/mapping/placement/trivial.py index df54d94e5..95ee91d3b 100644 --- a/bqskit/passes/mapping/placement/trivial.py +++ b/bqskit/passes/mapping/placement/trivial.py @@ -20,7 +20,7 @@ async def run(self, circuit: Circuit, data: PassData) -> None: model = BasePass.get_model(circuit, data) data['placement'] = trivial_placement - _logger.info(f'Placed qudits on {data["placement"]}') + _logger.info(f'Placed qudits on {data['placement']}') # Raise an error if this is not a valid placement sg = model.coupling_graph.get_subgraph(data['placement']) diff --git a/tests/ir/gates/composed/test_power.py b/tests/ir/gates/composed/test_power.py index 4f19f57ae..82f5c3634 100644 --- a/tests/ir/gates/composed/test_power.py +++ b/tests/ir/gates/composed/test_power.py @@ -34,7 +34,7 @@ def _recursively_calc_power_grad( dgn = _recursively_calc_power_grad(g, dg, power - 1) return g @ dgn + dg @ g.ipower(power - 1) -from bqskit.ir.gates import CRYGate + @given(gates_and_params(), integers(min_value=-10, max_value=10)) def test_power_gate(g_and_p: tuple[Gate, RealVector], power: int) -> None: gate, params = g_and_p From 09c02c8b72f0a787226a04ee5c5004ab8a51de0d Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Thu, 1 Aug 2024 11:55:24 -0700 Subject: [PATCH 105/197] Renamed DiagonalSynthesisPass to WalshDiagonalSynthesisPass --- bqskit/passes/synthesis/__init__.py | 4 ++-- bqskit/passes/synthesis/diagonal.py | 12 ++++++------ tests/passes/synthesis/test_diagonal.py | 14 +++++++------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/bqskit/passes/synthesis/__init__.py b/bqskit/passes/synthesis/__init__.py index 6b7abe330..ca3d4896a 100644 --- a/bqskit/passes/synthesis/__init__.py +++ b/bqskit/passes/synthesis/__init__.py @@ -1,7 +1,7 @@ """This package implements synthesis passes and synthesis related classes.""" from __future__ import annotations -from bqskit.passes.synthesis.diagonal import DiagonalSynthesisPass +from bqskit.passes.synthesis.diagonal import WalshDiagonalSynthesisPass from bqskit.passes.synthesis.leap import LEAPSynthesisPass from bqskit.passes.synthesis.pas import PermutationAwareSynthesisPass from bqskit.passes.synthesis.qfast import QFASTDecompositionPass @@ -11,7 +11,7 @@ from bqskit.passes.synthesis.target import SetTargetPass __all__ = [ - 'DiagonalSynthesisPass', + 'WalshDiagonalSynthesisPass', 'LEAPSynthesisPass', 'QFASTDecompositionPass', 'QPredictDecompositionPass', diff --git a/bqskit/passes/synthesis/diagonal.py b/bqskit/passes/synthesis/diagonal.py index 194d84eb4..969703a5f 100644 --- a/bqskit/passes/synthesis/diagonal.py +++ b/bqskit/passes/synthesis/diagonal.py @@ -1,4 +1,4 @@ -"""This module implements the DiagonalSynthesisPass.""" +"""This module implements the WalshDiagonalSynthesisPass.""" from __future__ import annotations import logging @@ -20,9 +20,9 @@ _logger = logging.getLogger(__name__) -class DiagonalSynthesisPass(SynthesisPass): +class WalshDiagonalSynthesisPass(SynthesisPass): """ - A pass that synthesizes diagonal unitaries. + A pass that synthesizes diagonal unitaries into Walsh functions. Based on: https://arxiv.org/abs/1306.3991 """ @@ -32,7 +32,7 @@ def __init__( parameter_precision: float = 1e-8, ) -> None: """ - Constructor for DiagonalSynthesisPass. + Constructor for WalshDiagonalSynthesisPass. Args: parameter_precision (float): Pauli strings with parameter values @@ -79,12 +79,12 @@ async def synthesize( ) -> Circuit: """Synthesize `utry`, see :class:`SynthesisPass` for more.""" if not isinstance(utry, UnitaryMatrix): - m = 'DiagonalSynthesisPass can only synthesize diagonal, ' + m = 'WalshDiagonalSynthesisPass can only synthesize diagonal, ' m += f'`UnitaryMatrix`s, got {type(utry)}.' raise TypeError(m) if not utry.is_qubit_only(): - m = 'DiagonalSynthesisPass can only synthesize diagonal ' + m = 'WalshDiagonalSynthesisPass can only synthesize diagonal ' m += '`UnitaryMatrix`s with qubits, got higher radix than 2.' raise ValueError(m) diff --git a/tests/passes/synthesis/test_diagonal.py b/tests/passes/synthesis/test_diagonal.py index 5e2c557a4..e72b6f500 100644 --- a/tests/passes/synthesis/test_diagonal.py +++ b/tests/passes/synthesis/test_diagonal.py @@ -5,12 +5,12 @@ from bqskit.compiler import Compiler from bqskit.ir.circuit import Circuit -from bqskit.passes.synthesis.diagonal import DiagonalSynthesisPass +from bqskit.passes.synthesis.diagonal import WalshDiagonalSynthesisPass from bqskit.qis import UnitaryMatrix from bqskit.qis.pauliz import PauliZMatrices -class TestDiagonalSynthesis: +class TestWalshDiagonalSynthesis: def test_1_qubit(self, compiler: Compiler) -> None: num_qubits = 1 @@ -20,7 +20,7 @@ def test_1_qubit(self, compiler: Compiler) -> None: utry = UnitaryMatrix(expm(1j * H_matrix)) circuit = Circuit.from_unitary(utry) - synthesis = DiagonalSynthesisPass() + synthesis = WalshDiagonalSynthesisPass() circuit = compiler.compile(circuit, [synthesis]) dist = circuit.get_unitary().get_distance_from(utry) @@ -34,7 +34,7 @@ def test_2_qubit(self, compiler: Compiler) -> None: utry = UnitaryMatrix(expm(1j * H_matrix)) circuit = Circuit.from_unitary(utry) - synthesis = DiagonalSynthesisPass() + synthesis = WalshDiagonalSynthesisPass() circuit = compiler.compile(circuit, [synthesis]) dist = circuit.get_unitary().get_distance_from(utry) @@ -48,7 +48,7 @@ def test_3_qubit(self, compiler: Compiler) -> None: utry = UnitaryMatrix(expm(1j * H_matrix)) circuit = Circuit.from_unitary(utry) - synthesis = DiagonalSynthesisPass() + synthesis = WalshDiagonalSynthesisPass() circuit = compiler.compile(circuit, [synthesis]) dist = circuit.get_unitary().get_distance_from(utry) @@ -62,7 +62,7 @@ def test_4_qubit(self, compiler: Compiler) -> None: utry = UnitaryMatrix(expm(1j * H_matrix)) circuit = Circuit.from_unitary(utry) - synthesis = DiagonalSynthesisPass() + synthesis = WalshDiagonalSynthesisPass() circuit = compiler.compile(circuit, [synthesis]) dist = circuit.get_unitary().get_distance_from(utry) @@ -76,7 +76,7 @@ def test_5_qubit(self, compiler: Compiler) -> None: utry = UnitaryMatrix(expm(1j * H_matrix)) circuit = Circuit.from_unitary(utry) - synthesis = DiagonalSynthesisPass() + synthesis = WalshDiagonalSynthesisPass() circuit = compiler.compile(circuit, [synthesis]) dist = circuit.get_unitary().get_distance_from(utry) From a7421018dd21521302d144d9f7b19ed784f8628d Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 5 Aug 2024 10:00:32 -0400 Subject: [PATCH 106/197] Pre-commit cleanup --- bqskit/ir/circuit.py | 2 +- tests/ir/circuit/test_region_methods.py | 3 +-- tests/qis/test_graph.py | 1 + 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bqskit/ir/circuit.py b/bqskit/ir/circuit.py index 7e19c6e25..38eb40d1d 100644 --- a/bqskit/ir/circuit.py +++ b/bqskit/ir/circuit.py @@ -2293,7 +2293,7 @@ def default_scoring_fn(region: CircuitRegion) -> float: for qudit in op.location: if qudit not in region_bldr: region_bldr[qudit] = CycleInterval(point[0], point[0]) - need_to_fully_check = True + need_to_fully_check = True elif point[0] < region_bldr[qudit][0]: # Check for gates in the middle not in region diff --git a/tests/ir/circuit/test_region_methods.py b/tests/ir/circuit/test_region_methods.py index 669564dda..f6629fd49 100644 --- a/tests/ir/circuit/test_region_methods.py +++ b/tests/ir/circuit/test_region_methods.py @@ -398,7 +398,6 @@ def test_surround_filter_topology(self) -> None: circuit.append_gate(CNOTGate(), [3, 4]) def region_filter(region: CircuitRegion) -> bool: - print(circuit.get_slice(region.points).coupling_graph, circuit.get_slice(region.points).coupling_graph.is_linear(), region) return circuit.get_slice(region.points).coupling_graph.is_linear() region = circuit.surround( @@ -406,7 +405,6 @@ def region_filter(region: CircuitRegion) -> bool: region_filter(region) ), ) - print(region) assert circuit.is_valid_region(region) assert region.location == CircuitLocation([1, 2, 3, 4]) @@ -419,6 +417,7 @@ def test_check_region_1() -> None: c.append_gate(CNOTGate(), [1, 2]) assert not c.is_valid_region({1: (0, 2), 2: (0, 2), 3: (0, 2)}) + def test_check_region_2() -> None: c = Circuit(3) c.append_gate(CNOTGate(), [0, 1]) diff --git a/tests/qis/test_graph.py b/tests/qis/test_graph.py index 577ab6a1c..5a205e8ee 100644 --- a/tests/qis/test_graph.py +++ b/tests/qis/test_graph.py @@ -64,6 +64,7 @@ def test_invalid(self) -> None: with pytest.raises(TypeError): coupling_graph.get_subgraph('a') # type: ignore + def test_is_linear() -> None: coupling_graph = CouplingGraph({(0, 1), (1, 2), (2, 3)}) assert coupling_graph.is_linear() From e26b639c4875d41cf3bc4902e29d1a6968a40c99 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 5 Aug 2024 12:02:35 -0400 Subject: [PATCH 107/197] Update pre-commit... finally... --- .pre-commit-config.yaml | 16 ++++++++-------- bqskit/ext/qiskit/models.py | 2 +- bqskit/ir/interval.py | 2 +- bqskit/ir/point.py | 2 +- bqskit/qis/state/state.py | 6 +++++- bqskit/qis/unitary/unitary.py | 7 ++++++- bqskit/qis/unitary/unitarymatrix.py | 7 ++++++- 7 files changed, 28 insertions(+), 14 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bcdf16e4f..5fd25af39 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ ci: skip: [mypy] repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.6.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer @@ -30,7 +30,7 @@ repos: - --wrap-summaries=80 - --wrap-descriptions=80 - repo: https://github.com/pre-commit/mirrors-autopep8 - rev: v2.0.2 + rev: v2.0.4 hooks: - id: autopep8 args: @@ -39,13 +39,13 @@ repos: - --ignore=E731 exclude: 'tests/ext.*' - repo: https://github.com/asottile/pyupgrade - rev: v3.10.1 + rev: v3.17.0 hooks: - id: pyupgrade args: - --py38-plus - repo: https://github.com/asottile/reorder_python_imports - rev: v3.10.0 + rev: v3.13.0 hooks: - id: reorder-python-imports args: @@ -54,25 +54,25 @@ repos: - --py37-plus exclude: 'tests/ext.*' - repo: https://github.com/asottile/add-trailing-comma - rev: v3.0.1 + rev: v3.1.0 hooks: - id: add-trailing-comma args: - --py36-plus - repo: https://github.com/PyCQA/autoflake - rev: v2.2.0 + rev: v2.3.1 hooks: - id: autoflake args: - --in-place - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.5.0 + rev: v1.11.1 hooks: - id: mypy exclude: tests/qis/test_pauli.py additional_dependencies: ["numpy>=1.21"] - repo: https://github.com/PyCQA/flake8 - rev: 6.1.0 + rev: 7.1.1 hooks: - id: flake8 args: diff --git a/bqskit/ext/qiskit/models.py b/bqskit/ext/qiskit/models.py index 237771390..9b42d57f3 100644 --- a/bqskit/ext/qiskit/models.py +++ b/bqskit/ext/qiskit/models.py @@ -23,7 +23,7 @@ def model_from_backend(backend: BackendV1) -> MachineModel: num_qudits = config.n_qubits gate_set = _basis_gate_str_to_bqskit_gate(config.basis_gates) coupling_map = list({tuple(sorted(e)) for e in config.coupling_map}) - return MachineModel(num_qudits, coupling_map, gate_set) # type: ignore + return MachineModel(num_qudits, coupling_map, gate_set) def _basis_gate_str_to_bqskit_gate(basis_gates: list[str]) -> set[Gate]: diff --git a/bqskit/ir/interval.py b/bqskit/ir/interval.py index 5375955a6..97f8a6552 100644 --- a/bqskit/ir/interval.py +++ b/bqskit/ir/interval.py @@ -89,7 +89,7 @@ def __new__( 'Expected positive integers, got {lower} and {upper}.', ) - return super().__new__(cls, (lower, upper)) # type: ignore + return super().__new__(cls, (lower, upper)) @property def lower(self) -> int: diff --git a/bqskit/ir/point.py b/bqskit/ir/point.py index a44d1b1f5..0e510c449 100644 --- a/bqskit/ir/point.py +++ b/bqskit/ir/point.py @@ -66,7 +66,7 @@ def __new__( else: raise TypeError('Expected two integer arguments.') - return super().__new__(cls, (cycle, qudit)) # type: ignore + return super().__new__(cls, (cycle, qudit)) @property def cycle(self) -> int: diff --git a/bqskit/qis/state/state.py b/bqskit/qis/state/state.py index bd8f93e88..d29d61521 100644 --- a/bqskit/qis/state/state.py +++ b/bqskit/qis/state/state.py @@ -433,4 +433,8 @@ def __repr__(self) -> str: return repr(self._vec) -StateLike = Union[StateVector, np.ndarray, Sequence[Union[int, float, complex]]] +StateLike = Union[ + StateVector, + npt.NDArray[np.complex128], + Sequence[Union[int, float, complex]], +] diff --git a/bqskit/qis/unitary/unitary.py b/bqskit/qis/unitary/unitary.py index cb4ada60f..fb797ecce 100644 --- a/bqskit/qis/unitary/unitary.py +++ b/bqskit/qis/unitary/unitary.py @@ -7,6 +7,7 @@ from typing import Union import numpy as np +import numpy.typing as npt from bqskit.qis.unitary.meta import UnitaryMeta from bqskit.utils.typing import is_real_number @@ -151,4 +152,8 @@ def is_self_inverse(self, params: RealVector = []) -> bool: return np.allclose(unitary_matrix, hermitian_conjugate) -RealVector = Union[Sequence[float], np.ndarray] +RealVector = Union[ + Sequence[float], + npt.NDArray[np.float64], + npt.NDArray[np.float32], +] diff --git a/bqskit/qis/unitary/unitarymatrix.py b/bqskit/qis/unitary/unitarymatrix.py index 5448d9215..04fafc616 100644 --- a/bqskit/qis/unitary/unitarymatrix.py +++ b/bqskit/qis/unitary/unitarymatrix.py @@ -540,6 +540,11 @@ def __hash__(self) -> int: UnitaryLike = Union[ UnitaryMatrix, - np.ndarray, + npt.NDArray[np.complex128], + npt.NDArray[np.complex64], + npt.NDArray[np.int64], + npt.NDArray[np.int32], + npt.NDArray[np.float64], + npt.NDArray[np.float32], Sequence[Sequence[Union[int, float, complex]]], ] From 1c20cb219a6dec5ddc129d626b85921bfbcac94f Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 5 Aug 2024 12:03:48 -0400 Subject: [PATCH 108/197] Fixed the refix of the unfix... fixed --- bqskit/passes/mapping/placement/trivial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/passes/mapping/placement/trivial.py b/bqskit/passes/mapping/placement/trivial.py index 95ee91d3b..df54d94e5 100644 --- a/bqskit/passes/mapping/placement/trivial.py +++ b/bqskit/passes/mapping/placement/trivial.py @@ -20,7 +20,7 @@ async def run(self, circuit: Circuit, data: PassData) -> None: model = BasePass.get_model(circuit, data) data['placement'] = trivial_placement - _logger.info(f'Placed qudits on {data['placement']}') + _logger.info(f'Placed qudits on {data["placement"]}') # Raise an error if this is not a valid placement sg = model.coupling_graph.get_subgraph(data['placement']) From 966ec6d9a6757796c494071b14870ffcbfa76894 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 5 Aug 2024 12:33:08 -0400 Subject: [PATCH 109/197] Fixed overflow in Unitary.dim --- bqskit/qis/unitary/unitary.py | 10 +++++++++- tests/qis/unitary/test_props.py | 6 ++++++ 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 tests/qis/unitary/test_props.py diff --git a/bqskit/qis/unitary/unitary.py b/bqskit/qis/unitary/unitary.py index fb797ecce..8aa6c4d23 100644 --- a/bqskit/qis/unitary/unitary.py +++ b/bqskit/qis/unitary/unitary.py @@ -54,7 +54,15 @@ def dim(self) -> int: if hasattr(self, '_dim'): return self._dim - return int(np.prod(self.radixes)) + # return int(np.prod(self.radixes)) + # Above line removed due to failure to handle overflow and + # underflows for large dimensions. + + acm = 1 + for radix in self.radixes: + acm *= radix + return acm + @abc.abstractmethod def get_unitary(self, params: RealVector = []) -> UnitaryMatrix: diff --git a/tests/qis/unitary/test_props.py b/tests/qis/unitary/test_props.py new file mode 100644 index 000000000..d86ca7efa --- /dev/null +++ b/tests/qis/unitary/test_props.py @@ -0,0 +1,6 @@ +from bqskit.ir.circuit import Circuit + +def test_circuit_dim_overflow() -> None: + c = Circuit(1024) + assert c.dim != 0 + From 70cca157a26550f9e7d2a3817ee92bf2d84d6c04 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 5 Aug 2024 13:06:45 -0400 Subject: [PATCH 110/197] Fixed CI --- bqskit/qis/unitary/unitary.py | 3 +-- tests/qis/unitary/test_props.py | 4 +++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/bqskit/qis/unitary/unitary.py b/bqskit/qis/unitary/unitary.py index 8aa6c4d23..b9f9966d5 100644 --- a/bqskit/qis/unitary/unitary.py +++ b/bqskit/qis/unitary/unitary.py @@ -60,10 +60,9 @@ def dim(self) -> int: acm = 1 for radix in self.radixes: - acm *= radix + acm *= int(radix) return acm - @abc.abstractmethod def get_unitary(self, params: RealVector = []) -> UnitaryMatrix: """ diff --git a/tests/qis/unitary/test_props.py b/tests/qis/unitary/test_props.py index d86ca7efa..2df433a69 100644 --- a/tests/qis/unitary/test_props.py +++ b/tests/qis/unitary/test_props.py @@ -1,6 +1,8 @@ +from __future__ import annotations + from bqskit.ir.circuit import Circuit + def test_circuit_dim_overflow() -> None: c = Circuit(1024) assert c.dim != 0 - From 003b96303c8d3bceecbca327eb91c3d85edf9f42 Mon Sep 17 00:00:00 2001 From: Marc Davis Date: Mon, 5 Aug 2024 19:09:51 -0400 Subject: [PATCH 111/197] errors with canceling a task will throw immediately --- bqskit/runtime/worker.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 87cbcfb66..26dd2e53f 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -326,16 +326,11 @@ def _handle_cancel(self, addr: RuntimeAddress) -> None: self._cancelled_task_ids.add(addr) # Remove all tasks that are children of `addr` from initialized tasks - error = None for key, task in self._tasks.items(): if task.is_descendant_of(addr): for mailbox_id in self._tasks[key].owned_mailboxes: self._mailboxes.pop(mailbox_id) - try: - task.cancel() - except Exception as e: - if error is None: - error = e + task.cancel() self._tasks = { a: t for a, t in self._tasks.items() if not t.is_descendant_of(addr) @@ -347,9 +342,6 @@ def _handle_cancel(self, addr: RuntimeAddress) -> None: if not t.is_descendant_of(addr) ] - # if there was an error earlier, raise it now - if error is not None: - raise error def _get_next_ready_task(self) -> RuntimeTask | None: """Return the next ready task if one exists, otherwise None.""" From 2093e5f1d0c4f34b0338a34a14888798b50adbde Mon Sep 17 00:00:00 2001 From: Marc Davis Date: Mon, 5 Aug 2024 19:26:59 -0400 Subject: [PATCH 112/197] attempting to satsisfy the pep8 checker --- bqskit/runtime/worker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 26dd2e53f..ede0fd548 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -328,9 +328,9 @@ def _handle_cancel(self, addr: RuntimeAddress) -> None: # Remove all tasks that are children of `addr` from initialized tasks for key, task in self._tasks.items(): if task.is_descendant_of(addr): + task.cancel() for mailbox_id in self._tasks[key].owned_mailboxes: self._mailboxes.pop(mailbox_id) - task.cancel() self._tasks = { a: t for a, t in self._tasks.items() if not t.is_descendant_of(addr) @@ -342,7 +342,6 @@ def _handle_cancel(self, addr: RuntimeAddress) -> None: if not t.is_descendant_of(addr) ] - def _get_next_ready_task(self) -> RuntimeTask | None: """Return the next ready task if one exists, otherwise None.""" while True: @@ -500,7 +499,8 @@ def submit( fnarg, RuntimeAddress(self._id, mailbox_id, 0), self._active_task.comp_task_id, - self._active_task.breadcrumbs + (self._active_task.return_address,), + self._active_task.breadcrumbs + + (self._active_task.return_address,), self._active_task.logging_level, self._active_task.max_logging_depth, ) From 988c97b80bf940421c74bb7b5fdb8d7243840b58 Mon Sep 17 00:00:00 2001 From: Marc Davis Date: Mon, 5 Aug 2024 19:32:07 -0400 Subject: [PATCH 113/197] trying to find a compromise between pep8 and flake8 --- bqskit/runtime/worker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index ede0fd548..d6f86f183 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -498,9 +498,9 @@ def submit( task = RuntimeTask( fnarg, RuntimeAddress(self._id, mailbox_id, 0), - self._active_task.comp_task_id, - self._active_task.breadcrumbs + - (self._active_task.return_address,), + + self._active_task.comp_task_id, + self._active_task.breadcrumbs + + (self._active_task.return_address,), self._active_task.logging_level, self._active_task.max_logging_depth, ) From 1954267442b69ee7210cc1da7a4739b2a671e154 Mon Sep 17 00:00:00 2001 From: Marc Davis Date: Mon, 5 Aug 2024 19:38:42 -0400 Subject: [PATCH 114/197] added a comment to help future debugging understand a potential error case --- bqskit/runtime/task.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index b200b54d9..bd413374a 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -104,6 +104,10 @@ def start(self) -> None: def cancel(self) -> None: """Ask the coroutine to gracefully exit.""" if self.coro is not None: + # If this call to "close" raises a RuntimeError, + # it is likely a blanket try/accept catching the + # error used to stop the coroutine, preventing + # it from stopping correctly. self.coro.close() else: raise RuntimeError('Task was cancelled with None coroutine.') From f6ccd81509f4c0cd0e21e03498c0a1bf5a437a04 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Tue, 6 Aug 2024 07:31:25 -0400 Subject: [PATCH 115/197] Removed plus --- bqskit/runtime/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index d6f86f183..00961d8de 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -498,7 +498,7 @@ def submit( task = RuntimeTask( fnarg, RuntimeAddress(self._id, mailbox_id, 0), - + self._active_task.comp_task_id, + self._active_task.comp_task_id, self._active_task.breadcrumbs + (self._active_task.return_address,), self._active_task.logging_level, From 649f418bd65446b52308bea3370ffe4c5b876355 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Tue, 13 Aug 2024 22:54:56 -0700 Subject: [PATCH 116/197] Added workflow_registry and register_workflow --- bqskit/compiler/register.py | 80 +++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 bqskit/compiler/register.py diff --git a/bqskit/compiler/register.py b/bqskit/compiler/register.py new file mode 100644 index 000000000..038d8bc7a --- /dev/null +++ b/bqskit/compiler/register.py @@ -0,0 +1,80 @@ +"""This module defines a global `worflow_registery` to modify workflows.""" +from __future__ import annotations + +from typing import Optional + +import logging + +from bqskit.compiler.machine import MachineModel +from bqskit.compiler.basepass import BasePass +from bqskit.compiler.workflow import WorkflowLike +from bqskit.compiler.workflow import Workflow + + +_logger = logging.getLogger(__name__) + + +workflow_registry: dict[MachineModel, dict[int, WorkflowLike]] = {} +""" +The workflow_registry enables MachineModel specific workflows to be registered +for used in the `bqskit.compile` method. + +The workflow_registry maps MachineModels a dictionary of Workflows which +are indexed by optimization level. This object should not be accessed directly +by the user, but instead through the `register_workflow` function. + +Examples: + model_t = SpecificMachineModel(num_qudits, radixes) + workflow = [QuickPartitioner(3), NewFangledOptimization()] + register_workflow(model_t, workflow, level) + ... + new_circuit = compile(circuit, model_t, optimization_level=level) +""" + + +def register_workflow( + machine: MachineModel, + workflow: WorkflowLike, + optimization_level: Optional[int] = 1, +) -> None: + """ + Register a workflow for a given machine model. + + Args: + machine (MachineModel): The machine to register the workflow for. + + workflow (list[BasePass]): The workflow or list of passes that whill + be executed if the MachineModel in a call to `compile` matches + `machine`. If `machine` is already registered, a warning will be + logged. + + optimization_level (Optional[int]): The optimization level with + which to register the workflow. If no level is provided, the + Workflow will be registered as level 1. (Default: 1) + + Raises: + TypeError: If `machine` is not a MachineModel. + + TypeError: If `workflow` is not a list of BasePass objects. + """ + if not isinstance(machine, MachineModel): + m = f'`machine` must be a MachineModel, got {type(machine)}.' + raise TypeError(m) + + if isinstance(workflow, BasePass): + workflow = Workflow(workflow) + + for p in workflow: + if not isinstance(p, BasePass): + m = 'All elements of `workflow` must be BasePass objects. Got ' + m += f'{type(p)}.' + raise TypeError(m) + + global workflow_registry + + if machine in workflow_registry: + if optimization_level in workflow_registry[machine]: + m = f'Overwritting workflow for {machine} at level ' + m += f'{optimization_level}.' + _logger.warn(m) + workflow_registry[machine].update({optimization_level: workflow}) \ No newline at end of file From b4e53664a0626deb3e8783a9122dc11882cc59a3 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Tue, 13 Aug 2024 22:55:21 -0700 Subject: [PATCH 117/197] compile checks workflow_registry --- bqskit/compiler/compile.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/bqskit/compiler/compile.py b/bqskit/compiler/compile.py index 4824c42ad..c791d2d61 100644 --- a/bqskit/compiler/compile.py +++ b/bqskit/compiler/compile.py @@ -15,6 +15,7 @@ from bqskit.compiler.compiler import Compiler from bqskit.compiler.machine import MachineModel from bqskit.compiler.passdata import PassData +from bqskit.compiler.register import workflow_registry from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike from bqskit.ir.circuit import Circuit @@ -668,6 +669,14 @@ def build_workflow( """Build a BQSKit Off-the-Shelf workflow, see :func:`compile` for info.""" if model is None: model = MachineModel(input.num_qudits, radixes=input.radixes) + + # Use a registered workflow if model is found in the registry for a given + # optimization_level + for machine_model in workflow_registry: + gate_set_match = machine_model.gate_set == model.gate_set + opt_lvl_found = optimization_level in workflow_registry[machine_model] + if gate_set_match and opt_lvl_found: + return workflow_registry[machine_model][optimization_level] if isinstance(input, Circuit): if input.num_qudits > max_synthesis_size: From 5912fdf6b64f88767c6d273314d42ea7e638b8c7 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Tue, 13 Aug 2024 23:11:50 -0700 Subject: [PATCH 118/197] Pre-commit --- bqskit/compiler/compile.py | 2 +- bqskit/compiler/register.py | 48 ++++++++++++++++++------------------- 2 files changed, 24 insertions(+), 26 deletions(-) diff --git a/bqskit/compiler/compile.py b/bqskit/compiler/compile.py index c791d2d61..f8d568116 100644 --- a/bqskit/compiler/compile.py +++ b/bqskit/compiler/compile.py @@ -669,7 +669,7 @@ def build_workflow( """Build a BQSKit Off-the-Shelf workflow, see :func:`compile` for info.""" if model is None: model = MachineModel(input.num_qudits, radixes=input.radixes) - + # Use a registered workflow if model is found in the registry for a given # optimization_level for machine_model in workflow_registry: diff --git a/bqskit/compiler/register.py b/bqskit/compiler/register.py index 038d8bc7a..0a78fd51a 100644 --- a/bqskit/compiler/register.py +++ b/bqskit/compiler/register.py @@ -1,20 +1,3 @@ -"""This module defines a global `worflow_registery` to modify workflows.""" -from __future__ import annotations - -from typing import Optional - -import logging - -from bqskit.compiler.machine import MachineModel -from bqskit.compiler.basepass import BasePass -from bqskit.compiler.workflow import WorkflowLike -from bqskit.compiler.workflow import Workflow - - -_logger = logging.getLogger(__name__) - - -workflow_registry: dict[MachineModel, dict[int, WorkflowLike]] = {} """ The workflow_registry enables MachineModel specific workflows to be registered for used in the `bqskit.compile` method. @@ -30,12 +13,26 @@ ... new_circuit = compile(circuit, model_t, optimization_level=level) """ +from __future__ import annotations + +import logging + +from bqskit.compiler.basepass import BasePass +from bqskit.compiler.machine import MachineModel +from bqskit.compiler.workflow import Workflow +from bqskit.compiler.workflow import WorkflowLike + + +_logger = logging.getLogger(__name__) + + +workflow_registry: dict[MachineModel, dict[int, Workflow]] = {} def register_workflow( machine: MachineModel, workflow: WorkflowLike, - optimization_level: Optional[int] = 1, + optimization_level: int = 1, ) -> None: """ Register a workflow for a given machine model. @@ -47,11 +44,11 @@ def register_workflow( be executed if the MachineModel in a call to `compile` matches `machine`. If `machine` is already registered, a warning will be logged. - - optimization_level (Optional[int]): The optimization level with + + optimization_level (Optional[int]): The optimization level with which to register the workflow. If no level is provided, the Workflow will be registered as level 1. (Default: 1) - + Raises: TypeError: If `machine` is not a MachineModel. @@ -60,9 +57,8 @@ def register_workflow( if not isinstance(machine, MachineModel): m = f'`machine` must be a MachineModel, got {type(machine)}.' raise TypeError(m) - - if isinstance(workflow, BasePass): - workflow = Workflow(workflow) + + workflow = Workflow(workflow) for p in workflow: if not isinstance(p, BasePass): @@ -77,4 +73,6 @@ def register_workflow( m = f'Overwritting workflow for {machine} at level ' m += f'{optimization_level}.' _logger.warn(m) - workflow_registry[machine].update({optimization_level: workflow}) \ No newline at end of file + workflow_registry[machine].update({optimization_level: workflow}) + else: + workflow_registry[machine] = {optimization_level: workflow} From 18929e17e90226b26baf27613e14cd01c72121ad Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Fri, 16 Aug 2024 15:03:12 -0700 Subject: [PATCH 119/197] Workflows can be registered through GateSets --- bqskit/compiler/compile.py | 15 ++++++++++----- bqskit/compiler/register.py | 37 +++++++++++++++++++++---------------- 2 files changed, 31 insertions(+), 21 deletions(-) diff --git a/bqskit/compiler/compile.py b/bqskit/compiler/compile.py index f8d568116..f5b6ca16c 100644 --- a/bqskit/compiler/compile.py +++ b/bqskit/compiler/compile.py @@ -13,6 +13,7 @@ import numpy as np from bqskit.compiler.compiler import Compiler +from bqskit.compiler.gateset import GateSet from bqskit.compiler.machine import MachineModel from bqskit.compiler.passdata import PassData from bqskit.compiler.register import workflow_registry @@ -672,11 +673,15 @@ def build_workflow( # Use a registered workflow if model is found in the registry for a given # optimization_level - for machine_model in workflow_registry: - gate_set_match = machine_model.gate_set == model.gate_set - opt_lvl_found = optimization_level in workflow_registry[machine_model] - if gate_set_match and opt_lvl_found: - return workflow_registry[machine_model][optimization_level] + for machine_or_gateset in workflow_registry: + if isinstance(machine_or_gateset, GateSet): + gate_set = machine_or_gateset + else: + gate_set = machine_or_gateset.gate_set + gs_match = gate_set == model.gate_set + ol_found = optimization_level in workflow_registry[machine_or_gateset] + if gs_match and ol_found: + return workflow_registry[machine_or_gateset][optimization_level] if isinstance(input, Circuit): if input.num_qudits > max_synthesis_size: diff --git a/bqskit/compiler/register.py b/bqskit/compiler/register.py index 0a78fd51a..acc91d6bb 100644 --- a/bqskit/compiler/register.py +++ b/bqskit/compiler/register.py @@ -1,12 +1,12 @@ """ -The workflow_registry enables MachineModel specific workflows to be registered -for used in the `bqskit.compile` method. +The workflow_registry enables MachineModel or GateSet specific workflows to be +registered for used in the `bqskit.compile` method. The workflow_registry maps MachineModels a dictionary of Workflows which are indexed by optimization level. This object should not be accessed directly by the user, but instead through the `register_workflow` function. -Examples: +Example: model_t = SpecificMachineModel(num_qudits, radixes) workflow = [QuickPartitioner(3), NewFangledOptimization()] register_workflow(model_t, workflow, level) @@ -18,6 +18,7 @@ import logging from bqskit.compiler.basepass import BasePass +from bqskit.compiler.gateset import GateSet from bqskit.compiler.machine import MachineModel from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike @@ -26,11 +27,11 @@ _logger = logging.getLogger(__name__) -workflow_registry: dict[MachineModel, dict[int, Workflow]] = {} +workflow_registry: dict[MachineModel | GateSet, dict[int, Workflow]] = {} def register_workflow( - machine: MachineModel, + machine_or_gateset: MachineModel | GateSet, workflow: WorkflowLike, optimization_level: int = 1, ) -> None: @@ -38,7 +39,9 @@ def register_workflow( Register a workflow for a given machine model. Args: - machine (MachineModel): The machine to register the workflow for. + machine_or_gateset (MachineModel | GateSet): A MachineModel or GateSet + to register the workflow for. If a circuit is compiled targeting + this machine or gate set, the registered workflow will be used. workflow (list[BasePass]): The workflow or list of passes that whill be executed if the MachineModel in a call to `compile` matches @@ -50,12 +53,14 @@ def register_workflow( Workflow will be registered as level 1. (Default: 1) Raises: - TypeError: If `machine` is not a MachineModel. + TypeError: If `machine_or_gateset` is not a MachineModel or GateSet. TypeError: If `workflow` is not a list of BasePass objects. """ - if not isinstance(machine, MachineModel): - m = f'`machine` must be a MachineModel, got {type(machine)}.' + if not isinstance(machine_or_gateset, MachineModel) and not \ + isinstance(machine_or_gateset, GateSet): + m = '`machine_or_gateset` must be a MachineModel or ' + m += f'GateSet, got {type(machine_or_gateset)}.' raise TypeError(m) workflow = Workflow(workflow) @@ -67,12 +72,12 @@ def register_workflow( raise TypeError(m) global workflow_registry - - if machine in workflow_registry: - if optimization_level in workflow_registry[machine]: - m = f'Overwritting workflow for {machine} at level ' - m += f'{optimization_level}.' + new_workflow = workflow_registry[machine_or_gateset] + if machine_or_gateset in workflow_registry: + if optimization_level in workflow_registry[machine_or_gateset]: + m = f'Overwritting workflow for {machine_or_gateset} ' + m += f'at level {optimization_level}.' _logger.warn(m) - workflow_registry[machine].update({optimization_level: workflow}) + workflow_registry[machine_or_gateset].update(new_workflow) else: - workflow_registry[machine] = {optimization_level: workflow} + workflow_registry[machine_or_gateset] = new_workflow From bb3febb78fb0a9f680ce68bb59696985c216f0fb Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Fri, 16 Aug 2024 15:31:15 -0700 Subject: [PATCH 120/197] GateSets are hashable --- bqskit/compiler/gateset.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bqskit/compiler/gateset.py b/bqskit/compiler/gateset.py index d8111ec4a..63672e768 100644 --- a/bqskit/compiler/gateset.py +++ b/bqskit/compiler/gateset.py @@ -230,6 +230,10 @@ def __str__(self) -> str: def __repr__(self) -> str: """Detailed representation of the GateSet.""" return self._gates.__repr__().replace('frozenset', 'GateSet') + + def __hash__(self) -> int: + """Hash of the GateSet.""" + return self.__repr__().__hash__() GateSetLike = Union[GateSet, Iterable[Gate], Gate] From 3694184b5625c910bfc0c5ac8aa58eeca2d00d5d Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Fri, 16 Aug 2024 15:31:54 -0700 Subject: [PATCH 121/197] Check for Gate sequences --- bqskit/compiler/register.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/bqskit/compiler/register.py b/bqskit/compiler/register.py index acc91d6bb..37b6b4aec 100644 --- a/bqskit/compiler/register.py +++ b/bqskit/compiler/register.py @@ -23,6 +23,7 @@ from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike +from bqskit.ir.gate import Gate _logger = logging.getLogger(__name__) @@ -57,11 +58,13 @@ def register_workflow( TypeError: If `workflow` is not a list of BasePass objects. """ - if not isinstance(machine_or_gateset, MachineModel) and not \ - isinstance(machine_or_gateset, GateSet): - m = '`machine_or_gateset` must be a MachineModel or ' - m += f'GateSet, got {type(machine_or_gateset)}.' - raise TypeError(m) + if not isinstance(machine_or_gateset, MachineModel): + if all(isinstance(g, Gate) for g in machine_or_gateset): + machine_or_gateset = GateSet(machine_or_gateset) + else: + m = '`machine_or_gateset` must be a MachineModel or ' + m += f'GateSet, got {type(machine_or_gateset)}.' + raise TypeError(m) workflow = Workflow(workflow) @@ -72,7 +75,7 @@ def register_workflow( raise TypeError(m) global workflow_registry - new_workflow = workflow_registry[machine_or_gateset] + new_workflow = {optimization_level: workflow} if machine_or_gateset in workflow_registry: if optimization_level in workflow_registry[machine_or_gateset]: m = f'Overwritting workflow for {machine_or_gateset} ' From 888e0b21b6b4633845dca78b34cc44b8064492b8 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Fri, 16 Aug 2024 15:32:09 -0700 Subject: [PATCH 122/197] Tests for register_workflow --- tests/compiler/test_register.py | 95 +++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 tests/compiler/test_register.py diff --git a/tests/compiler/test_register.py b/tests/compiler/test_register.py new file mode 100644 index 000000000..02a4e5256 --- /dev/null +++ b/tests/compiler/test_register.py @@ -0,0 +1,95 @@ +"""This file tests the register_workflow function.""" +from __future__ import annotations + +from itertools import combinations + +from random import choice + +from bqskit.compiler import compile +from bqskit.compiler.machine import MachineModel +from bqskit.compiler.register import workflow_registry +from bqskit.compiler.register import register_workflow +from bqskit.compiler.workflow import Workflow + +from bqskit.ir import Circuit +from bqskit.ir import Gate +from bqskit.ir.gates import CZGate +from bqskit.ir.gates import HGate +from bqskit.ir.gates import RZGate + +from bqskit.passes import QuickPartitioner +from bqskit.passes import ScanningGateRemovalPass + + +def machine_match(mach_a: MachineModel, mach_b: MachineModel) -> bool: + if mach_a.num_qudits != mach_b.num_qudits: + return False + if mach_a.radixes != mach_b.radixes: + return False + if mach_a.coupling_graph != mach_b.coupling_graph: + return False + if mach_a.gate_set != mach_b.gate_set: + return False + return True + + +def workflow_match(workflow_a: Workflow, workflow_b: Workflow) -> bool: + if len(workflow_a) != len(workflow_b): + return False + for a, b in zip(workflow_a, workflow_b): + if a.name != b.name: + return False + return True + + +def simple_circuit(num_qudits: int, gate_set: list[Gate]) -> Circuit: + circ = Circuit(num_qudits) + gate = choice(gate_set) + if gate.num_qudits == 1: + loc = choice(range(num_qudits)) + else: + loc = choice(list(combinations(range(num_qudits), 2))) + gate_inv = gate.get_inverse() + circ.append_gate(gate, loc) + circ.append_gate(gate_inv, loc) + return circ + + +class TestRegisterWorkflow: + + def test_register_workflow(self) -> None: + assert workflow_registry == {} + machine = MachineModel(3) + workflow = [QuickPartitioner(), ScanningGateRemovalPass()] + register_workflow(machine, workflow) + assert machine in workflow_registry + assert 1 in workflow_registry[machine] + assert workflow_match(workflow_registry[machine][1], workflow) + + def test_custom_compile_machine(self) -> None: + gateset = [CZGate(), HGate(), RZGate()] + num_qudits = 3 + machine = MachineModel(num_qudits, gate_set=gateset) + workflow = [QuickPartitioner(2)] + register_workflow(machine, workflow) + circuit = simple_circuit(num_qudits, gateset) + result = compile(circuit, machine) + assert result.get_unitary() == circuit.get_unitary() + assert result.num_operations > 0 + assert result.gate_counts != circuit.gate_counts + result.unfold_all() + assert result.gate_counts == circuit.gate_counts + + def test_custom_compile_gateset(self) -> None: + gateset = [CZGate(), HGate(), RZGate()] + num_qudits = 3 + machine = MachineModel(num_qudits, gate_set=gateset) + workflow = [QuickPartitioner(2)] + register_workflow(gateset, workflow) + circuit = simple_circuit(num_qudits, gateset) + result = compile(circuit, machine) + assert result.get_unitary() == circuit.get_unitary() + assert result.num_operations > 0 + assert result.gate_counts != circuit.gate_counts + result.unfold_all() + assert result.gate_counts == circuit.gate_counts \ No newline at end of file From f007e6bf01db410a8c3309492ce4eff5e09e8efe Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Fri, 16 Aug 2024 15:40:32 -0700 Subject: [PATCH 123/197] pre-commit --- bqskit/compiler/gateset.py | 2 +- bqskit/compiler/register.py | 13 ++++++++----- tests/compiler/test_register.py | 19 ++++++++++++------- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/bqskit/compiler/gateset.py b/bqskit/compiler/gateset.py index 63672e768..05f735df5 100644 --- a/bqskit/compiler/gateset.py +++ b/bqskit/compiler/gateset.py @@ -230,7 +230,7 @@ def __str__(self) -> str: def __repr__(self) -> str: """Detailed representation of the GateSet.""" return self._gates.__repr__().replace('frozenset', 'GateSet') - + def __hash__(self) -> int: """Hash of the GateSet.""" return self.__repr__().__hash__() diff --git a/bqskit/compiler/register.py b/bqskit/compiler/register.py index 37b6b4aec..c655a5821 100644 --- a/bqskit/compiler/register.py +++ b/bqskit/compiler/register.py @@ -19,10 +19,10 @@ from bqskit.compiler.basepass import BasePass from bqskit.compiler.gateset import GateSet +from bqskit.compiler.gateset import GateSetLike from bqskit.compiler.machine import MachineModel from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike - from bqskit.ir.gate import Gate _logger = logging.getLogger(__name__) @@ -32,7 +32,7 @@ def register_workflow( - machine_or_gateset: MachineModel | GateSet, + machine_or_gateset: MachineModel | GateSetLike, workflow: WorkflowLike, optimization_level: int = 1, ) -> None: @@ -40,9 +40,10 @@ def register_workflow( Register a workflow for a given machine model. Args: - machine_or_gateset (MachineModel | GateSet): A MachineModel or GateSet - to register the workflow for. If a circuit is compiled targeting - this machine or gate set, the registered workflow will be used. + machine_or_gateset (MachineModel | GateSetLike): A MachineModel or + GateSetLike to register the workflow under. If a circuit is + compiled targeting this machine or gate set, the registered + workflow will be used. workflow (list[BasePass]): The workflow or list of passes that whill be executed if the MachineModel in a call to `compile` matches @@ -59,6 +60,8 @@ def register_workflow( TypeError: If `workflow` is not a list of BasePass objects. """ if not isinstance(machine_or_gateset, MachineModel): + if isinstance(machine_or_gateset, Gate): + machine_or_gateset = [machine_or_gateset] if all(isinstance(g, Gate) for g in machine_or_gateset): machine_or_gateset = GateSet(machine_or_gateset) else: diff --git a/tests/compiler/test_register.py b/tests/compiler/test_register.py index 02a4e5256..414d90a06 100644 --- a/tests/compiler/test_register.py +++ b/tests/compiler/test_register.py @@ -2,21 +2,19 @@ from __future__ import annotations from itertools import combinations - from random import choice from bqskit.compiler import compile from bqskit.compiler.machine import MachineModel -from bqskit.compiler.register import workflow_registry from bqskit.compiler.register import register_workflow +from bqskit.compiler.register import workflow_registry from bqskit.compiler.workflow import Workflow - +from bqskit.compiler.workflow import WorkflowLike from bqskit.ir import Circuit from bqskit.ir import Gate from bqskit.ir.gates import CZGate from bqskit.ir.gates import HGate from bqskit.ir.gates import RZGate - from bqskit.passes import QuickPartitioner from bqskit.passes import ScanningGateRemovalPass @@ -33,7 +31,14 @@ def machine_match(mach_a: MachineModel, mach_b: MachineModel) -> bool: return True -def workflow_match(workflow_a: Workflow, workflow_b: Workflow) -> bool: +def workflow_match( + workflow_a: WorkflowLike, + workflow_b: WorkflowLike, +) -> bool: + if not isinstance(workflow_a, Workflow): + workflow_a = Workflow(workflow_a) + if not isinstance(workflow_b, Workflow): + workflow_b = Workflow(workflow_b) if len(workflow_a) != len(workflow_b): return False for a, b in zip(workflow_a, workflow_b): @@ -48,7 +53,7 @@ def simple_circuit(num_qudits: int, gate_set: list[Gate]) -> Circuit: if gate.num_qudits == 1: loc = choice(range(num_qudits)) else: - loc = choice(list(combinations(range(num_qudits), 2))) + loc = choice(list(combinations(range(num_qudits), 2))) # type: ignore gate_inv = gate.get_inverse() circ.append_gate(gate, loc) circ.append_gate(gate_inv, loc) @@ -92,4 +97,4 @@ def test_custom_compile_gateset(self) -> None: assert result.num_operations > 0 assert result.gate_counts != circuit.gate_counts result.unfold_all() - assert result.gate_counts == circuit.gate_counts \ No newline at end of file + assert result.gate_counts == circuit.gate_counts From 0a33d182fecade311ee4fcfb4dae84aee94ba5a5 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Fri, 16 Aug 2024 15:50:33 -0700 Subject: [PATCH 124/197] Added test for optimization_level=2 --- tests/compiler/test_register.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/compiler/test_register.py b/tests/compiler/test_register.py index 414d90a06..b3131ce99 100644 --- a/tests/compiler/test_register.py +++ b/tests/compiler/test_register.py @@ -15,6 +15,8 @@ from bqskit.ir.gates import CZGate from bqskit.ir.gates import HGate from bqskit.ir.gates import RZGate +from bqskit.ir.gates import U3Gate +from bqskit.passes import QSearchSynthesisPass from bqskit.passes import QuickPartitioner from bqskit.passes import ScanningGateRemovalPass @@ -98,3 +100,15 @@ def test_custom_compile_gateset(self) -> None: assert result.gate_counts != circuit.gate_counts result.unfold_all() assert result.gate_counts == circuit.gate_counts + + def test_custom_opt_level(self) -> None: + gateset = [CZGate(), HGate(), RZGate()] + num_qudits = 3 + machine = MachineModel(num_qudits, gate_set=gateset) + workflow = [QSearchSynthesisPass()] + register_workflow(gateset, workflow, 2) + circuit = simple_circuit(num_qudits, gateset) + result = compile(circuit, machine, optimization_level=2) + assert result.get_unitary() == circuit.get_unitary() + assert result.gate_counts != circuit.gate_counts + assert U3Gate() in result.gate_set From a8b5e25521b143a7fe952b91e6640f7b3bfa0836 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Mon, 19 Aug 2024 20:45:41 -0700 Subject: [PATCH 125/197] Renamed workflow_registry -> _workflow_registry --- bqskit/compiler/compile.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bqskit/compiler/compile.py b/bqskit/compiler/compile.py index f5b6ca16c..941f29060 100644 --- a/bqskit/compiler/compile.py +++ b/bqskit/compiler/compile.py @@ -16,7 +16,7 @@ from bqskit.compiler.gateset import GateSet from bqskit.compiler.machine import MachineModel from bqskit.compiler.passdata import PassData -from bqskit.compiler.register import workflow_registry +from bqskit.compiler.register import _workflow_registry from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike from bqskit.ir.circuit import Circuit @@ -673,15 +673,15 @@ def build_workflow( # Use a registered workflow if model is found in the registry for a given # optimization_level - for machine_or_gateset in workflow_registry: + for machine_or_gateset in _workflow_registry: if isinstance(machine_or_gateset, GateSet): gate_set = machine_or_gateset else: gate_set = machine_or_gateset.gate_set gs_match = gate_set == model.gate_set - ol_found = optimization_level in workflow_registry[machine_or_gateset] + ol_found = optimization_level in _workflow_registry[machine_or_gateset] if gs_match and ol_found: - return workflow_registry[machine_or_gateset][optimization_level] + return _workflow_registry[machine_or_gateset][optimization_level] if isinstance(input, Circuit): if input.num_qudits > max_synthesis_size: From ae23a051a6877e5151f45c53583f86369e419ef4 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Mon, 19 Aug 2024 20:46:00 -0700 Subject: [PATCH 126/197] Added clear_register --- bqskit/compiler/register.py | 26 ++++++++++++++++++-------- tests/compiler/test_register.py | 33 ++++++++++++++++++++++++--------- 2 files changed, 42 insertions(+), 17 deletions(-) diff --git a/bqskit/compiler/register.py b/bqskit/compiler/register.py index c655a5821..8668cec79 100644 --- a/bqskit/compiler/register.py +++ b/bqskit/compiler/register.py @@ -1,8 +1,8 @@ """ -The workflow_registry enables MachineModel or GateSet specific workflows to be +The _workflow_registry enables MachineModel or GateSet specific workflows to be registered for used in the `bqskit.compile` method. -The workflow_registry maps MachineModels a dictionary of Workflows which +The _workflow_registry maps MachineModels a dictionary of Workflows which are indexed by optimization level. This object should not be accessed directly by the user, but instead through the `register_workflow` function. @@ -28,7 +28,7 @@ _logger = logging.getLogger(__name__) -workflow_registry: dict[MachineModel | GateSet, dict[int, Workflow]] = {} +_workflow_registry: dict[MachineModel | GateSet, dict[int, Workflow]] = {} def register_workflow( @@ -77,13 +77,23 @@ def register_workflow( m += f'{type(p)}.' raise TypeError(m) - global workflow_registry + global _workflow_registry new_workflow = {optimization_level: workflow} - if machine_or_gateset in workflow_registry: - if optimization_level in workflow_registry[machine_or_gateset]: + if machine_or_gateset in _workflow_registry: + if optimization_level in _workflow_registry[machine_or_gateset]: m = f'Overwritting workflow for {machine_or_gateset} ' m += f'at level {optimization_level}.' _logger.warn(m) - workflow_registry[machine_or_gateset].update(new_workflow) + _workflow_registry[machine_or_gateset].update(new_workflow) else: - workflow_registry[machine_or_gateset] = new_workflow + _workflow_registry[machine_or_gateset] = new_workflow + + +def clear_registry() -> None: + """ + Clear the workflow registry. + + This will remove all registered workflows from the registry. + """ + global _workflow_registry + _workflow_registry.clear() diff --git a/tests/compiler/test_register.py b/tests/compiler/test_register.py index b3131ce99..31e544d4c 100644 --- a/tests/compiler/test_register.py +++ b/tests/compiler/test_register.py @@ -4,10 +4,14 @@ from itertools import combinations from random import choice +import pytest +from numpy import allclose + from bqskit.compiler import compile from bqskit.compiler.machine import MachineModel +from bqskit.compiler.register import _workflow_registry +from bqskit.compiler.register import clear_registry from bqskit.compiler.register import register_workflow -from bqskit.compiler.register import workflow_registry from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike from bqskit.ir import Circuit @@ -33,6 +37,10 @@ def machine_match(mach_a: MachineModel, mach_b: MachineModel) -> bool: return True +def unitary_match(unit_a: Circuit, unit_b: Circuit) -> bool: + return allclose(unit_a.get_unitary(), unit_b.get_unitary(), atol=1e-5) + + def workflow_match( workflow_a: WorkflowLike, workflow_b: WorkflowLike, @@ -64,14 +72,21 @@ def simple_circuit(num_qudits: int, gate_set: list[Gate]) -> Circuit: class TestRegisterWorkflow: + @pytest.fixture(autouse=True) + def setup(self) -> None: + # _workflow_registry.clear() + clear_registry() + def test_register_workflow(self) -> None: - assert workflow_registry == {} - machine = MachineModel(3) + assert _workflow_registry == {} + gateset = [CZGate(), HGate(), RZGate()] + num_qudits = 3 + machine = MachineModel(num_qudits, gate_set=gateset) workflow = [QuickPartitioner(), ScanningGateRemovalPass()] register_workflow(machine, workflow) - assert machine in workflow_registry - assert 1 in workflow_registry[machine] - assert workflow_match(workflow_registry[machine][1], workflow) + assert machine in _workflow_registry + assert 1 in _workflow_registry[machine] + assert workflow_match(_workflow_registry[machine][1], workflow) def test_custom_compile_machine(self) -> None: gateset = [CZGate(), HGate(), RZGate()] @@ -81,7 +96,7 @@ def test_custom_compile_machine(self) -> None: register_workflow(machine, workflow) circuit = simple_circuit(num_qudits, gateset) result = compile(circuit, machine) - assert result.get_unitary() == circuit.get_unitary() + assert unitary_match(result, circuit) assert result.num_operations > 0 assert result.gate_counts != circuit.gate_counts result.unfold_all() @@ -95,7 +110,7 @@ def test_custom_compile_gateset(self) -> None: register_workflow(gateset, workflow) circuit = simple_circuit(num_qudits, gateset) result = compile(circuit, machine) - assert result.get_unitary() == circuit.get_unitary() + assert unitary_match(result, circuit) assert result.num_operations > 0 assert result.gate_counts != circuit.gate_counts result.unfold_all() @@ -109,6 +124,6 @@ def test_custom_opt_level(self) -> None: register_workflow(gateset, workflow, 2) circuit = simple_circuit(num_qudits, gateset) result = compile(circuit, machine, optimization_level=2) - assert result.get_unitary() == circuit.get_unitary() + assert unitary_match(result, circuit) assert result.gate_counts != circuit.gate_counts assert U3Gate() in result.gate_set From 6e59f4b1eb366285937aaaa54d1f4eac8f67f91e Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Mon, 19 Aug 2024 20:48:33 -0700 Subject: [PATCH 127/197] register -> registry --- bqskit/compiler/compile.py | 2 +- bqskit/compiler/{register.py => registry.py} | 0 tests/compiler/{test_register.py => test_registry.py} | 6 +++--- 3 files changed, 4 insertions(+), 4 deletions(-) rename bqskit/compiler/{register.py => registry.py} (100%) rename tests/compiler/{test_register.py => test_registry.py} (96%) diff --git a/bqskit/compiler/compile.py b/bqskit/compiler/compile.py index 941f29060..c6b9f8bab 100644 --- a/bqskit/compiler/compile.py +++ b/bqskit/compiler/compile.py @@ -16,7 +16,7 @@ from bqskit.compiler.gateset import GateSet from bqskit.compiler.machine import MachineModel from bqskit.compiler.passdata import PassData -from bqskit.compiler.register import _workflow_registry +from bqskit.compiler.registry import _workflow_registry from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike from bqskit.ir.circuit import Circuit diff --git a/bqskit/compiler/register.py b/bqskit/compiler/registry.py similarity index 100% rename from bqskit/compiler/register.py rename to bqskit/compiler/registry.py diff --git a/tests/compiler/test_register.py b/tests/compiler/test_registry.py similarity index 96% rename from tests/compiler/test_register.py rename to tests/compiler/test_registry.py index 31e544d4c..ca037bca9 100644 --- a/tests/compiler/test_register.py +++ b/tests/compiler/test_registry.py @@ -9,9 +9,9 @@ from bqskit.compiler import compile from bqskit.compiler.machine import MachineModel -from bqskit.compiler.register import _workflow_registry -from bqskit.compiler.register import clear_registry -from bqskit.compiler.register import register_workflow +from bqskit.compiler.registry import _workflow_registry +from bqskit.compiler.registry import clear_registry +from bqskit.compiler.registry import register_workflow from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike from bqskit.ir import Circuit From 06fd2894fd6d428a1a502dfb5a5b79b8d3e6ea31 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Mon, 26 Aug 2024 18:52:39 -0700 Subject: [PATCH 128/197] Created DiscreteLayerGenerator --- bqskit/passes/__init__.py | 3 + bqskit/passes/search/generators/__init__.py | 2 + bqskit/passes/search/generators/discrete.py | 213 ++++++++++++++++++ .../passes/search/generators/test_discrete.py | 51 +++++ 4 files changed, 269 insertions(+) create mode 100644 bqskit/passes/search/generators/discrete.py create mode 100644 tests/passes/search/generators/test_discrete.py diff --git a/bqskit/passes/__init__.py b/bqskit/passes/__init__.py index 9a386f6fb..ef8034be9 100644 --- a/bqskit/passes/__init__.py +++ b/bqskit/passes/__init__.py @@ -195,6 +195,7 @@ :toctree: autogen :recursive: + DiscreteLayerGenerator FourParamGenerator MiddleOutLayerGenerator SeedLayerGenerator @@ -274,6 +275,7 @@ from bqskit.passes.rules.zxzxz import ZXZXZDecomposition from bqskit.passes.search.frontier import Frontier from bqskit.passes.search.generator import LayerGenerator +from bqskit.passes.search.generators.discrete import DiscreteLayerGenerator from bqskit.passes.search.generators.fourparam import FourParamGenerator from bqskit.passes.search.generators.middleout import MiddleOutLayerGenerator from bqskit.passes.search.generators.seed import SeedLayerGenerator @@ -341,6 +343,7 @@ 'DijkstraHeuristic', 'Frontier', 'LayerGenerator', + 'DiscreteLayerGenerator', 'HeuristicFunction', 'SeedLayerGenerator', 'BlockConversionPass', diff --git a/bqskit/passes/search/generators/__init__.py b/bqskit/passes/search/generators/__init__.py index aec7fab2e..308667cea 100644 --- a/bqskit/passes/search/generators/__init__.py +++ b/bqskit/passes/search/generators/__init__.py @@ -1,6 +1,7 @@ """This package contains LayerGenerator definitions.""" from __future__ import annotations +from bqskit.passes.search.generators.discrete import DiscreteLayerGenerator from bqskit.passes.search.generators.fourparam import FourParamGenerator from bqskit.passes.search.generators.middleout import MiddleOutLayerGenerator from bqskit.passes.search.generators.seed import SeedLayerGenerator @@ -10,6 +11,7 @@ from bqskit.passes.search.generators.wide import WideLayerGenerator __all__ = [ + 'DiscreteLayerGenerator', 'FourParamGenerator', 'MiddleOutLayerGenerator', 'SeedLayerGenerator', diff --git a/bqskit/passes/search/generators/discrete.py b/bqskit/passes/search/generators/discrete.py new file mode 100644 index 000000000..6b8d3f10a --- /dev/null +++ b/bqskit/passes/search/generators/discrete.py @@ -0,0 +1,213 @@ +"""This module implements the DiscreteLayerGenerator class.""" +from __future__ import annotations + +import logging +from typing import Sequence + +from bqskit.compiler.passdata import PassData +from bqskit.ir.circuit import Circuit +from bqskit.ir.gate import Gate +from bqskit.ir.gates import CNOTGate +from bqskit.ir.gates import HGate +from bqskit.ir.gates import TGate +from bqskit.ir.gates.parameterized.pauliz import PauliZGate +from bqskit.ir.operation import Operation +from bqskit.passes.search.generator import LayerGenerator +from bqskit.qis.state.state import StateVector +from bqskit.qis.state.system import StateSystem +from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix +from bqskit.utils.typing import is_sequence + + +_logger = logging.getLogger(__name__) + + +class DiscreteLayerGenerator(LayerGenerator): + """ + The DiscreteLayerGenerator class. + + Expands circuits using only discrete gates. + """ + + def __init__( + self, + gateset: Sequence[Gate] = [HGate(), TGate(), CNOTGate()], + double_headed: bool = False, + ) -> None: + """ + Construct a DiscreteLayerGenerator. + + Args: + gateset (Sequence[Gate]): A sequence of gates that can be used + in the output circuit. These must be non-parameterized gates. + (Default: [HGate(), TGate(), CNOTGate()]) + + double_headed (bool): If True, successors will be generated by + both appending and prepending gates. (Default: False) + + Raises: + TypeError: If the gateset is not a sequence. + + TypeError: If the gateset contains a parameterized gate. + + TypeError: If the radices of gates are different. + """ + if not is_sequence(gateset): + m = f'Expected sequence of gates, got {type(gateset)}.' + raise TypeError(m) + + radix = gateset[0].radixes[0] + for gate in gateset: + if gate.num_params > 0: + m = 'Expected gate for constant gates, got parameterized' + m += f' {gate} gate.' + raise TypeError(m) + for rad in gate.radixes: + if rad != radix: + m = f'Radix mismatch on gate: {gate}. ' + m += f'Expected {radix}, got {rad}.' + raise TypeError(m) + self.gateset = gateset + self.double_headed = double_headed + + def gen_initial_layer( + self, + target: UnitaryMatrix | StateVector | StateSystem, + data: PassData, + ) -> Circuit: + """ + Generate the initial layer, see LayerGenerator for more. + + Raises: + ValueError: If `target` has a radix mismatch with + `self.initial_layer_gate`. + """ + + if not isinstance(target, (UnitaryMatrix, StateVector, StateSystem)): + m = f'Expected unitary or state, got {type(target)}.' + raise TypeError(m) + + for radix in target.radixes: + if radix != self.gateset[0].radixes[0]: + m = 'Radix mismatch between target and gateset.' + raise ValueError(m) + + init_circuit = Circuit(target.num_qudits, target.radixes) + + if self.double_headed: + n = target.num_qudits + span = list(range(n)) + init_circuit.append_gate(PauliZGate(n), span) + + return init_circuit + + def cancels_something( + self, + circuit: Circuit, + gate: Gate, + location: tuple[int, ...], + ) -> bool: + """Ensure applying gate at location does not cancel a previous gate.""" + last_cycle = circuit.num_cycles - 1 + try: + op = circuit.get_operation((last_cycle, location[0])) + op_gate, op_location = op.gate, op.location + if op_location == location and op_gate.get_inverse() == gate: + return True + return False + except IndexError: + return False + + def count_repeats( + self, + circuit: Circuit, + gate: Gate, + qudit: int, + ) -> int: + """Count the number of times the last gate is repeated on qudit.""" + count = 0 + for cycle in reversed(range(circuit.num_cycles)): + try: + op = circuit.get_operation((cycle, qudit)) + if op.gate == gate: + count += 1 + else: + return count + except IndexError: + continue + return count + + def gen_successors(self, circuit: Circuit, data: PassData) -> list[Circuit]: + """ + Generate the successors of a circuit node. + + Raises: + ValueError: If circuit is a single-qudit circuit. + """ + if not isinstance(circuit, Circuit): + raise TypeError(f'Expected circuit, got {type(circuit)}.') + + if circuit.num_qudits < 2: + raise ValueError('Cannot expand a single-qudit circuit.') + + # Get the coupling graph + coupling_graph = data.connectivity + + # Generate successors + successors = [] + hashes = set() + singles = [gate for gate in self.gateset if gate.num_qudits == 1] + multis = [gate for gate in self.gateset if gate.num_qudits > 1] + + for gate in singles: + for qudit in range(circuit.num_qudits): + if self.cancels_something(circuit, gate, (qudit,)): + continue + if isinstance(gate, TGate): + if self.count_repeats(circuit, TGate(), qudit) >= 7: + continue + successor = circuit.copy() + successor.append_gate(gate, [qudit]) + + h = hash_circuit_structure(successor) + if h not in hashes: + successors.append(successor) + hashes.add(h) + + if self.double_headed: + successor = circuit.copy() + op = Operation(gate, [qudit]) + successor.insert(0, op) + h = hash_circuit_structure(successor) + if h not in hashes: + successors.append(successor) + hashes.add(h) + + for gate in multis: + for edge in coupling_graph: + if self.cancels_something(circuit, gate, edge): + continue + successor = circuit.copy() + successor.append_gate(gate, edge) + h = hash_circuit_structure(successor) + if h not in hashes: + successors.append(successor) + hashes.add(h) + + if self.double_headed: + successor = circuit.copy() + op = Operation(gate, edge) + successor.insert(0, op) + h = hash_circuit_structure(successor) + if h not in hashes: + successors.append(successor) + hashes.add(h) + + return successors + + +def hash_circuit_structure(circuit: Circuit) -> int: + hashes = [] + for op in circuit: + hashes.append(hash(op)) + return hash(tuple(hashes)) diff --git a/tests/passes/search/generators/test_discrete.py b/tests/passes/search/generators/test_discrete.py new file mode 100644 index 000000000..b60473729 --- /dev/null +++ b/tests/passes/search/generators/test_discrete.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from random import randint + +from bqskit.compiler.passdata import PassData +from bqskit.ir.circuit import Circuit +from bqskit.ir.gates import CNOTGate +from bqskit.ir.gates import HGate +from bqskit.ir.gates import TGate +from bqskit.passes.search.generators import DiscreteLayerGenerator + + +class TestDiscreteLayerGenerator: + + def test_gate_set(self) -> None: + gates = [HGate(), CNOTGate(), TGate()] + generator = DiscreteLayerGenerator() + assert all(g in generator.gateset for g in gates) + + def test_double_headed(self) -> None: + single_gen = DiscreteLayerGenerator(double_headed=False) + double_gen = DiscreteLayerGenerator(double_headed=True) + base = Circuit(4) + single_sucs = single_gen.gen_successors(base, PassData(base)) + double_sucs = double_gen.gen_successors(base, PassData(base)) + assert len(single_sucs) == len(double_sucs) + + base = Circuit(2) + base.append_gate(CNOTGate(), (0, 1)) + single_sucs = single_gen.gen_successors(base, PassData(base)) + double_sucs = double_gen.gen_successors(base, PassData(base)) + assert len(single_sucs) < len(double_sucs) + assert all(c in double_sucs for c in single_sucs) + + def test_cancels_something(self) -> None: + gen = DiscreteLayerGenerator() + base = Circuit(2) + base.append_gate(HGate(), (0,)) + base.append_gate(TGate(), (0,)) + base.append_gate(HGate(), (0,)) + assert gen.cancels_something(base, HGate(), (0,)) + assert not gen.cancels_something(base, HGate(), (1,)) + assert not gen.cancels_something(base, TGate(), (0,)) + + def test_count_repeats(self) -> None: + num_repeats = randint(1, 50) + c = Circuit(1) + for _ in range(num_repeats): + c.append_gate(HGate(), (0,)) + gen = DiscreteLayerGenerator() + assert gen.count_repeats(c, HGate(), 0) == num_repeats From 7246e371b5bcf169ccf4d814836825f21ed0e1d2 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Tue, 27 Aug 2024 11:07:18 -0400 Subject: [PATCH 129/197] Update --- bqskit/runtime/worker.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 254d686f5..1684f7dbc 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -378,7 +378,11 @@ def _handle_cancel(self, addr: RuntimeAddress) -> None: if not t.is_descendant_of(addr) ] - def _handle_communicate(self, addrs: list[RuntimeAddress], msg: Any) -> None: + def _handle_communicate( + self, + addrs: list[RuntimeAddress], + msg: Any, + ) -> None: for task_addr in addrs: if task_addr not in self._tasks: continue @@ -763,7 +767,7 @@ def start_worker( set_blas_thread_counts(num_blas_threads) # Enforce no default logging - logging.lastResort = logging.NullHandler() # type: ignore # typeshed#11770 + logging.lastResort = logging.NullHandler() logging.getLogger().handlers.clear() # Pin worker to cpu From 1242660754b4d4d7a97ed18b9cd4aa145bdab62a Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Tue, 27 Aug 2024 16:52:49 -0700 Subject: [PATCH 130/197] Reorder imports --- bqskit/passes/synthesis/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bqskit/passes/synthesis/__init__.py b/bqskit/passes/synthesis/__init__.py index ca3d4896a..6ebe9d814 100644 --- a/bqskit/passes/synthesis/__init__.py +++ b/bqskit/passes/synthesis/__init__.py @@ -1,7 +1,6 @@ """This package implements synthesis passes and synthesis related classes.""" from __future__ import annotations -from bqskit.passes.synthesis.diagonal import WalshDiagonalSynthesisPass from bqskit.passes.synthesis.leap import LEAPSynthesisPass from bqskit.passes.synthesis.pas import PermutationAwareSynthesisPass from bqskit.passes.synthesis.qfast import QFASTDecompositionPass @@ -9,9 +8,9 @@ from bqskit.passes.synthesis.qsearch import QSearchSynthesisPass from bqskit.passes.synthesis.synthesis import SynthesisPass from bqskit.passes.synthesis.target import SetTargetPass +from bqskit.passes.synthesis.diagonal import WalshDiagonalSynthesisPass __all__ = [ - 'WalshDiagonalSynthesisPass', 'LEAPSynthesisPass', 'QFASTDecompositionPass', 'QPredictDecompositionPass', @@ -19,4 +18,5 @@ 'SynthesisPass', 'SetTargetPass', 'PermutationAwareSynthesisPass', + 'WalshDiagonalSynthesisPass', ] From be5f07bb9578354de7fd0deacb9637cccc830b67 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Tue, 27 Aug 2024 17:10:03 -0700 Subject: [PATCH 131/197] add_successor function --- bqskit/passes/search/generators/discrete.py | 37 +++++++++------------ 1 file changed, 15 insertions(+), 22 deletions(-) diff --git a/bqskit/passes/search/generators/discrete.py b/bqskit/passes/search/generators/discrete.py index 6b8d3f10a..61abb12d4 100644 --- a/bqskit/passes/search/generators/discrete.py +++ b/bqskit/passes/search/generators/discrete.py @@ -159,6 +159,12 @@ def gen_successors(self, circuit: Circuit, data: PassData) -> list[Circuit]: singles = [gate for gate in self.gateset if gate.num_qudits == 1] multis = [gate for gate in self.gateset if gate.num_qudits > 1] + def add_to_successors(circuit: Circuit) -> None: + h = self.hash_circuit_structure(circuit) + if h not in hashes: + successors.append(circuit) + hashes.add(h) + for gate in singles: for qudit in range(circuit.num_qudits): if self.cancels_something(circuit, gate, (qudit,)): @@ -169,19 +175,13 @@ def gen_successors(self, circuit: Circuit, data: PassData) -> list[Circuit]: successor = circuit.copy() successor.append_gate(gate, [qudit]) - h = hash_circuit_structure(successor) - if h not in hashes: - successors.append(successor) - hashes.add(h) + add_to_successors(successor) if self.double_headed: successor = circuit.copy() op = Operation(gate, [qudit]) successor.insert(0, op) - h = hash_circuit_structure(successor) - if h not in hashes: - successors.append(successor) - hashes.add(h) + add_to_successors(successor) for gate in multis: for edge in coupling_graph: @@ -189,25 +189,18 @@ def gen_successors(self, circuit: Circuit, data: PassData) -> list[Circuit]: continue successor = circuit.copy() successor.append_gate(gate, edge) - h = hash_circuit_structure(successor) - if h not in hashes: - successors.append(successor) - hashes.add(h) + add_to_successors(successor) if self.double_headed: successor = circuit.copy() op = Operation(gate, edge) successor.insert(0, op) - h = hash_circuit_structure(successor) - if h not in hashes: - successors.append(successor) - hashes.add(h) + add_to_successors(successor) return successors - -def hash_circuit_structure(circuit: Circuit) -> int: - hashes = [] - for op in circuit: - hashes.append(hash(op)) - return hash(tuple(hashes)) + def hash_circuit_structure(self, circuit: Circuit) -> int: + hashes = [] + for op in circuit: + hashes.append(hash(op)) + return hash(tuple(hashes)) From f8469271ca862e3508bb84b7748bf1dec0397351 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Tue, 27 Aug 2024 17:10:30 -0700 Subject: [PATCH 132/197] Explicit import of DiscreteLayerGenerator --- tests/passes/search/generators/test_discrete.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/passes/search/generators/test_discrete.py b/tests/passes/search/generators/test_discrete.py index b60473729..dedaaa18a 100644 --- a/tests/passes/search/generators/test_discrete.py +++ b/tests/passes/search/generators/test_discrete.py @@ -7,7 +7,7 @@ from bqskit.ir.gates import CNOTGate from bqskit.ir.gates import HGate from bqskit.ir.gates import TGate -from bqskit.passes.search.generators import DiscreteLayerGenerator +from bqskit.passes.search.generators.discrete import DiscreteLayerGenerator class TestDiscreteLayerGenerator: From 3bf2954e35772bf0ca9545ac2cfe0c4d157f5a3f Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Tue, 27 Aug 2024 17:10:48 -0700 Subject: [PATCH 133/197] Debugging sphinx docs generation --- bqskit/passes/__init__.py | 7 ++++--- bqskit/passes/search/generators/__init__.py | 4 ++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/bqskit/passes/__init__.py b/bqskit/passes/__init__.py index ef8034be9..b76c2cdf1 100644 --- a/bqskit/passes/__init__.py +++ b/bqskit/passes/__init__.py @@ -28,12 +28,12 @@ :toctree: autogen :recursive: - DiagonalSynthesisPass LEAPSynthesisPass QSearchSynthesisPass QFASTDecompositionPass QPredictDecompositionPass SynthesisPass + WalshDiagonalSynthesisPass .. rubric:: Processing Passes @@ -287,6 +287,7 @@ from bqskit.passes.search.heuristics.astar import AStarHeuristic from bqskit.passes.search.heuristics.dijkstra import DijkstraHeuristic from bqskit.passes.search.heuristics.greedy import GreedyHeuristic +from bqskit.passes.synthesis.diagonal import WalshDiagonalSynthesisPass from bqskit.passes.synthesis.leap import LEAPSynthesisPass from bqskit.passes.synthesis.pas import PermutationAwareSynthesisPass from bqskit.passes.synthesis.qfast import QFASTDecompositionPass @@ -324,7 +325,7 @@ 'ScanPartitioner', 'QuickPartitioner', 'SynthesisPass', - 'DiagonalSynthesisPass', + 'WalshDiagonalSynthesisPass', 'LEAPSynthesisPass', 'QSearchSynthesisPass', 'QFASTDecompositionPass', @@ -337,13 +338,13 @@ 'ToU3Pass', 'ScanningGateRemovalPass', 'TreeScanningGateRemovalPass', + 'DiscreteLayerGenerator', 'SimpleLayerGenerator', 'AStarHeuristic', 'GreedyHeuristic', 'DijkstraHeuristic', 'Frontier', 'LayerGenerator', - 'DiscreteLayerGenerator', 'HeuristicFunction', 'SeedLayerGenerator', 'BlockConversionPass', diff --git a/bqskit/passes/search/generators/__init__.py b/bqskit/passes/search/generators/__init__.py index 308667cea..2453b7c84 100644 --- a/bqskit/passes/search/generators/__init__.py +++ b/bqskit/passes/search/generators/__init__.py @@ -1,7 +1,6 @@ """This package contains LayerGenerator definitions.""" from __future__ import annotations -from bqskit.passes.search.generators.discrete import DiscreteLayerGenerator from bqskit.passes.search.generators.fourparam import FourParamGenerator from bqskit.passes.search.generators.middleout import MiddleOutLayerGenerator from bqskit.passes.search.generators.seed import SeedLayerGenerator @@ -9,9 +8,9 @@ from bqskit.passes.search.generators.single import SingleQuditLayerGenerator from bqskit.passes.search.generators.stair import StairLayerGenerator from bqskit.passes.search.generators.wide import WideLayerGenerator +from bqskit.passes.search.generators.discrete import DiscreteLayerGenerator __all__ = [ - 'DiscreteLayerGenerator', 'FourParamGenerator', 'MiddleOutLayerGenerator', 'SeedLayerGenerator', @@ -19,4 +18,5 @@ 'SingleQuditLayerGenerator', 'StairLayerGenerator', 'WideLayerGenerator', + 'DiscreteLayerGenerator', ] From 47490492add1e5f40ad764bdb710cbb8b8d3d29c Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 28 Aug 2024 11:28:18 -0400 Subject: [PATCH 134/197] Fixed failing test from merge --- bqskit/passes/control/paralleldo.py | 4 ++-- bqskit/runtime/task.py | 6 +++++- tests/passes/control/test_paralleldo.py | 8 ++++---- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/bqskit/passes/control/paralleldo.py b/bqskit/passes/control/paralleldo.py index 42b9bbeee..a95168058 100644 --- a/bqskit/passes/control/paralleldo.py +++ b/bqskit/passes/control/paralleldo.py @@ -34,7 +34,7 @@ def __init__( self, pass_sequences: Iterable[WorkflowLike], less_than: Callable[[Circuit, Circuit], bool], - pick_fisrt: bool = False, + pick_first: bool = False, ) -> None: """ Construct a ParallelDo. @@ -63,7 +63,7 @@ def __init__( self.workflows = [Workflow(p) for p in pass_sequences] self.less_than = less_than - self.pick_first = pick_fisrt + self.pick_first = pick_first if len(self.workflows) == 0: raise ValueError('Must specify at least one workflow.') diff --git a/bqskit/runtime/task.py b/bqskit/runtime/task.py index 6a07d174a..d8cef7855 100644 --- a/bqskit/runtime/task.py +++ b/bqskit/runtime/task.py @@ -135,7 +135,11 @@ def cancel(self) -> None: # it is likely a blanket try/accept catching the # error used to stop the coroutine, preventing # it from stopping correctly. - self.coro.close() + try: + self.coro.close() + except ValueError: + # Coroutine is running and cannot be closed. + pass else: raise RuntimeError('Task was cancelled with None coroutine.') diff --git a/tests/passes/control/test_paralleldo.py b/tests/passes/control/test_paralleldo.py index 2d69b5e70..91a5c4f72 100644 --- a/tests/passes/control/test_paralleldo.py +++ b/tests/passes/control/test_paralleldo.py @@ -38,11 +38,11 @@ async def run(self, circuit: Circuit, data: PassData) -> None: data['key'] = '1' -class Sleep3Pass(BasePass): +class Sleep9Pass(BasePass): async def run(self, circuit: Circuit, data: PassData) -> None: circuit.append_gate(ZGate(), 0) - time.sleep(0.3) - data['key'] = '3' + time.sleep(0.9) + data['key'] = '9' def pick_z(c1: Circuit, c2: Circuit) -> bool: @@ -66,7 +66,7 @@ def test_parallel_do_no_passes() -> None: def test_parallel_do_pick_first(compiler: Compiler) -> None: - passes: list[list[BasePass]] = [[Sleep3Pass()], [Sleep1Pass()]] + passes: list[list[BasePass]] = [[Sleep9Pass()], [Sleep1Pass()]] pd_pass = ParallelDo(passes, pick_z, True) _, data = compiler.compile(Circuit(1), pd_pass, True) assert data['key'] == '1' From 92364bde212c4ee7fe81ce47590a2f638546c355 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 08:36:53 -0700 Subject: [PATCH 135/197] Fixed spelling error --- bqskit/compiler/registry.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/compiler/registry.py b/bqskit/compiler/registry.py index 8668cec79..50cb00523 100644 --- a/bqskit/compiler/registry.py +++ b/bqskit/compiler/registry.py @@ -45,7 +45,7 @@ def register_workflow( compiled targeting this machine or gate set, the registered workflow will be used. - workflow (list[BasePass]): The workflow or list of passes that whill + workflow (list[BasePass]): The workflow or list of passes that will be executed if the MachineModel in a call to `compile` matches `machine`. If `machine` is already registered, a warning will be logged. From a74a8aa3ee8f02d2de31b3651b70ebdf2f00f2fd Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 08:45:30 -0700 Subject: [PATCH 136/197] Added is_workflow static function --- bqskit/compiler/workflow.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/bqskit/compiler/workflow.py b/bqskit/compiler/workflow.py index 6134d07aa..0771ce6d5 100644 --- a/bqskit/compiler/workflow.py +++ b/bqskit/compiler/workflow.py @@ -87,6 +87,12 @@ def name(self) -> str: """The name of the pass.""" return self._name or self.__class__.__name__ + @staticmethod + def is_workflow(workflow: WorkflowLike) -> bool: + if not is_iterable(workflow): + return isinstance(workflow, BasePass) + return all(isinstance(p, BasePass) for p in workflow) + def __str__(self) -> str: name_seq = f'Workflow: {self.name}\n\t' pass_strs = [ From 6ff5f9ab1f149128899c7adc3716874a73fd0e7b Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 08:45:53 -0700 Subject: [PATCH 137/197] Workflow checking is done in Workflow construction --- bqskit/compiler/registry.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/bqskit/compiler/registry.py b/bqskit/compiler/registry.py index 50cb00523..b21abba7b 100644 --- a/bqskit/compiler/registry.py +++ b/bqskit/compiler/registry.py @@ -71,12 +71,6 @@ def register_workflow( workflow = Workflow(workflow) - for p in workflow: - if not isinstance(p, BasePass): - m = 'All elements of `workflow` must be BasePass objects. Got ' - m += f'{type(p)}.' - raise TypeError(m) - global _workflow_registry new_workflow = {optimization_level: workflow} if machine_or_gateset in _workflow_registry: From 3725b2209f809b97ca8176fac330f88608ad97ba Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 08:47:44 -0700 Subject: [PATCH 138/197] No default optimization level --- bqskit/compiler/registry.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bqskit/compiler/registry.py b/bqskit/compiler/registry.py index b21abba7b..8b46daa7e 100644 --- a/bqskit/compiler/registry.py +++ b/bqskit/compiler/registry.py @@ -34,7 +34,7 @@ def register_workflow( machine_or_gateset: MachineModel | GateSetLike, workflow: WorkflowLike, - optimization_level: int = 1, + optimization_level: int, ) -> None: """ Register a workflow for a given machine model. @@ -50,9 +50,9 @@ def register_workflow( `machine`. If `machine` is already registered, a warning will be logged. - optimization_level (Optional[int]): The optimization level with - which to register the workflow. If no level is provided, the - Workflow will be registered as level 1. (Default: 1) + optimization_level ptional[int): The optimization level with which + to register the workflow. If no level is provided, the Workflow + will be registered as level 1. Raises: TypeError: If `machine_or_gateset` is not a MachineModel or GateSet. From 6b7da3c6d336c97e75e6faf16698b7077e9db680 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 09:01:16 -0700 Subject: [PATCH 139/197] Permutation robust Gateset hash --- bqskit/compiler/gateset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/compiler/gateset.py b/bqskit/compiler/gateset.py index 05f735df5..be50ce692 100644 --- a/bqskit/compiler/gateset.py +++ b/bqskit/compiler/gateset.py @@ -233,7 +233,7 @@ def __repr__(self) -> str: def __hash__(self) -> int: """Hash of the GateSet.""" - return self.__repr__().__hash__() + return hash(tuple(sorted([g.name for g in self._gates]))) GateSetLike = Union[GateSet, Iterable[Gate], Gate] From 99dfee1afca9bcd9b98b12055ab01bf9fbb41b34 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 09:04:05 -0700 Subject: [PATCH 140/197] Gateset hash test --- tests/compiler/test_gateset.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/compiler/test_gateset.py b/tests/compiler/test_gateset.py index 009003a95..4f89b8fca 100644 --- a/tests/compiler/test_gateset.py +++ b/tests/compiler/test_gateset.py @@ -522,3 +522,16 @@ def test_gate_set_repr() -> None: repr(gate_set) == 'GateSet({CNOTGate, U3Gate})' or repr(gate_set) == 'GateSet({U3Gate, CNOTGate})' ) + + +def test_gate_set_hash() -> None: + gate_set_1 = GateSet({CNOTGate(), U3Gate()}) + gate_set_2 = GateSet({U3Gate(), CNOTGate()}) + gate_set_3 = GateSet({U3Gate(), CNOTGate(), RZGate()}) + + h1 = hash(gate_set_1) + h2 = hash(gate_set_2) + h3 = hash(gate_set_3) + + assert h1 == h2 + assert h1 != h3 From 1e02804fb18f8b2bcaf88fe064497daa7ad3881c Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 09:12:54 -0700 Subject: [PATCH 141/197] Moved documentation --- bqskit/compiler/registry.py | 44 ++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/bqskit/compiler/registry.py b/bqskit/compiler/registry.py index 8b46daa7e..579d5579f 100644 --- a/bqskit/compiler/registry.py +++ b/bqskit/compiler/registry.py @@ -1,23 +1,8 @@ -""" -The _workflow_registry enables MachineModel or GateSet specific workflows to be -registered for used in the `bqskit.compile` method. - -The _workflow_registry maps MachineModels a dictionary of Workflows which -are indexed by optimization level. This object should not be accessed directly -by the user, but instead through the `register_workflow` function. - -Example: - model_t = SpecificMachineModel(num_qudits, radixes) - workflow = [QuickPartitioner(3), NewFangledOptimization()] - register_workflow(model_t, workflow, level) - ... - new_circuit = compile(circuit, model_t, optimization_level=level) -""" +"""Register GateSet or MachineModel specific default workflows.""" from __future__ import annotations -import logging +import warnings -from bqskit.compiler.basepass import BasePass from bqskit.compiler.gateset import GateSet from bqskit.compiler.gateset import GateSetLike from bqskit.compiler.machine import MachineModel @@ -25,8 +10,6 @@ from bqskit.compiler.workflow import WorkflowLike from bqskit.ir.gate import Gate -_logger = logging.getLogger(__name__) - _workflow_registry: dict[MachineModel | GateSet, dict[int, Workflow]] = {} @@ -39,6 +22,12 @@ def register_workflow( """ Register a workflow for a given machine model. + The _workflow_registry enables MachineModel or GateSet specific workflows + to be registered for use in the `bqskit.compile` method. _workflow_registry + maps MachineModels a dictionary of Workflows which are indexed by + optimization level. This object should not be accessed directly by the user, + but instead through the `register_workflow` function. + Args: machine_or_gateset (MachineModel | GateSetLike): A MachineModel or GateSetLike to register the workflow under. If a circuit is @@ -54,10 +43,17 @@ def register_workflow( to register the workflow. If no level is provided, the Workflow will be registered as level 1. + Example: + model_t = SpecificMachineModel(num_qudits, radixes) + workflow = [QuickPartitioner(3), NewFangledOptimization()] + register_workflow(model_t, workflow, level) + ... + new_circuit = compile(circuit, model_t, optimization_level=level) + Raises: TypeError: If `machine_or_gateset` is not a MachineModel or GateSet. - TypeError: If `workflow` is not a list of BasePass objects. + Warning: If a workflow for a given optimization_level is overwritten. """ if not isinstance(machine_or_gateset, MachineModel): if isinstance(machine_or_gateset, Gate): @@ -75,9 +71,11 @@ def register_workflow( new_workflow = {optimization_level: workflow} if machine_or_gateset in _workflow_registry: if optimization_level in _workflow_registry[machine_or_gateset]: - m = f'Overwritting workflow for {machine_or_gateset} ' - m += f'at level {optimization_level}.' - _logger.warn(m) + m = f'Overwritting workflow for {machine_or_gateset} at level ' + m += f'{optimization_level}. If multiple Namespace packages are ' + m += 'installed, ensure that their __init__.py files do not ' + m += 'attempt to overwrite the same default Workflows.' + warnings.warn(m) _workflow_registry[machine_or_gateset].update(new_workflow) else: _workflow_registry[machine_or_gateset] = new_workflow From b6219a91ef6f1e2a5a2b89acff9a85c77854cce3 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 09:26:34 -0700 Subject: [PATCH 142/197] Removed clear_registry function --- bqskit/compiler/registry.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/bqskit/compiler/registry.py b/bqskit/compiler/registry.py index 579d5579f..94ac532be 100644 --- a/bqskit/compiler/registry.py +++ b/bqskit/compiler/registry.py @@ -24,7 +24,7 @@ def register_workflow( The _workflow_registry enables MachineModel or GateSet specific workflows to be registered for use in the `bqskit.compile` method. _workflow_registry - maps MachineModels a dictionary of Workflows which are indexed by + maps MachineModels a dictionary of Workflows which are indexed by optimization level. This object should not be accessed directly by the user, but instead through the `register_workflow` function. @@ -79,13 +79,3 @@ def register_workflow( _workflow_registry[machine_or_gateset].update(new_workflow) else: _workflow_registry[machine_or_gateset] = new_workflow - - -def clear_registry() -> None: - """ - Clear the workflow registry. - - This will remove all registered workflows from the registry. - """ - global _workflow_registry - _workflow_registry.clear() From 6e31654f3fd378bd236574764f284ca9b4647e21 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 09:26:55 -0700 Subject: [PATCH 143/197] Removed clear_registry function --- tests/compiler/test_registry.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/tests/compiler/test_registry.py b/tests/compiler/test_registry.py index ca037bca9..0c557be1c 100644 --- a/tests/compiler/test_registry.py +++ b/tests/compiler/test_registry.py @@ -10,7 +10,6 @@ from bqskit.compiler import compile from bqskit.compiler.machine import MachineModel from bqskit.compiler.registry import _workflow_registry -from bqskit.compiler.registry import clear_registry from bqskit.compiler.registry import register_workflow from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike @@ -74,26 +73,30 @@ class TestRegisterWorkflow: @pytest.fixture(autouse=True) def setup(self) -> None: - # _workflow_registry.clear() - clear_registry() + assert _workflow_registry == _workflow_registry + global _workflow_registry + _workflow_registry.clear() def test_register_workflow(self) -> None: + global _workflow_registry assert _workflow_registry == {} gateset = [CZGate(), HGate(), RZGate()] num_qudits = 3 machine = MachineModel(num_qudits, gate_set=gateset) workflow = [QuickPartitioner(), ScanningGateRemovalPass()] - register_workflow(machine, workflow) + register_workflow(machine, workflow, 1) assert machine in _workflow_registry assert 1 in _workflow_registry[machine] assert workflow_match(_workflow_registry[machine][1], workflow) def test_custom_compile_machine(self) -> None: + global _workflow_registry + assert _workflow_registry == {} gateset = [CZGate(), HGate(), RZGate()] num_qudits = 3 machine = MachineModel(num_qudits, gate_set=gateset) workflow = [QuickPartitioner(2)] - register_workflow(machine, workflow) + register_workflow(machine, workflow, 1) circuit = simple_circuit(num_qudits, gateset) result = compile(circuit, machine) assert unitary_match(result, circuit) @@ -103,11 +106,13 @@ def test_custom_compile_machine(self) -> None: assert result.gate_counts == circuit.gate_counts def test_custom_compile_gateset(self) -> None: + global _workflow_registry + assert _workflow_registry == {} gateset = [CZGate(), HGate(), RZGate()] num_qudits = 3 machine = MachineModel(num_qudits, gate_set=gateset) workflow = [QuickPartitioner(2)] - register_workflow(gateset, workflow) + register_workflow(gateset, workflow, 1) circuit = simple_circuit(num_qudits, gateset) result = compile(circuit, machine) assert unitary_match(result, circuit) @@ -117,6 +122,8 @@ def test_custom_compile_gateset(self) -> None: assert result.gate_counts == circuit.gate_counts def test_custom_opt_level(self) -> None: + global _workflow_registry + assert _workflow_registry == {} gateset = [CZGate(), HGate(), RZGate()] num_qudits = 3 machine = MachineModel(num_qudits, gate_set=gateset) From 6e010107a73e644d78a40dad2710f6a0690a928f Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 09:32:04 -0700 Subject: [PATCH 144/197] Changed test --- tests/compiler/test_registry.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/compiler/test_registry.py b/tests/compiler/test_registry.py index 0c557be1c..cce2eaab6 100644 --- a/tests/compiler/test_registry.py +++ b/tests/compiler/test_registry.py @@ -73,7 +73,6 @@ class TestRegisterWorkflow: @pytest.fixture(autouse=True) def setup(self) -> None: - assert _workflow_registry == _workflow_registry global _workflow_registry _workflow_registry.clear() From 46972c184fb4574ca8d70745cf021a3cac05ccb3 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 09:35:31 -0700 Subject: [PATCH 145/197] Fixed import global conflict --- tests/compiler/test_registry.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/compiler/test_registry.py b/tests/compiler/test_registry.py index cce2eaab6..b5a3d501f 100644 --- a/tests/compiler/test_registry.py +++ b/tests/compiler/test_registry.py @@ -73,7 +73,7 @@ class TestRegisterWorkflow: @pytest.fixture(autouse=True) def setup(self) -> None: - global _workflow_registry + # global _workflow_registry _workflow_registry.clear() def test_register_workflow(self) -> None: From 433a10817b6a2a7ff3fd80c05426e92cd3b773c1 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 10:56:45 -0700 Subject: [PATCH 146/197] MachineModels registered in _compile_registry --- bqskit/compiler/registry.py | 55 +++++++++++++------------------------ 1 file changed, 19 insertions(+), 36 deletions(-) diff --git a/bqskit/compiler/registry.py b/bqskit/compiler/registry.py index 94ac532be..6097a4e78 100644 --- a/bqskit/compiler/registry.py +++ b/bqskit/compiler/registry.py @@ -1,43 +1,38 @@ -"""Register GateSet or MachineModel specific default workflows.""" +"""Register MachineModel specific default workflows.""" from __future__ import annotations import warnings -from bqskit.compiler.gateset import GateSet -from bqskit.compiler.gateset import GateSetLike from bqskit.compiler.machine import MachineModel from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike -from bqskit.ir.gate import Gate -_workflow_registry: dict[MachineModel | GateSet, dict[int, Workflow]] = {} +_compile_registry: dict[MachineModel, dict[int, Workflow]] = {} def register_workflow( - machine_or_gateset: MachineModel | GateSetLike, + key: MachineModel, workflow: WorkflowLike, optimization_level: int, ) -> None: """ - Register a workflow for a given machine model. + Register a workflow for a given MachineModel. - The _workflow_registry enables MachineModel or GateSet specific workflows - to be registered for use in the `bqskit.compile` method. _workflow_registry - maps MachineModels a dictionary of Workflows which are indexed by - optimization level. This object should not be accessed directly by the user, - but instead through the `register_workflow` function. + The _compile_registry enables MachineModel specific workflows to be + registered for use in the `bqskit.compile` method. _compile_registry maps + MachineModels a dictionary of Workflows which are indexed by optimization + level. This object should not be accessed directly by the user, but + instead through the `register_workflow` function. Args: - machine_or_gateset (MachineModel | GateSetLike): A MachineModel or - GateSetLike to register the workflow under. If a circuit is - compiled targeting this machine or gate set, the registered - workflow will be used. + key (MachineModel): A MachineModel to register the workflow under. + If a circuit is compiled targeting this machine or gate set, the + registered workflow will be used. workflow (list[BasePass]): The workflow or list of passes that will be executed if the MachineModel in a call to `compile` matches - `machine`. If `machine` is already registered, a warning will be - logged. + `key`. If `key` is already registered, a warning will be logged. optimization_level ptional[int): The optimization level with which to register the workflow. If no level is provided, the Workflow @@ -51,31 +46,19 @@ def register_workflow( new_circuit = compile(circuit, model_t, optimization_level=level) Raises: - TypeError: If `machine_or_gateset` is not a MachineModel or GateSet. - Warning: If a workflow for a given optimization_level is overwritten. """ - if not isinstance(machine_or_gateset, MachineModel): - if isinstance(machine_or_gateset, Gate): - machine_or_gateset = [machine_or_gateset] - if all(isinstance(g, Gate) for g in machine_or_gateset): - machine_or_gateset = GateSet(machine_or_gateset) - else: - m = '`machine_or_gateset` must be a MachineModel or ' - m += f'GateSet, got {type(machine_or_gateset)}.' - raise TypeError(m) - workflow = Workflow(workflow) - global _workflow_registry + global _compile_registry new_workflow = {optimization_level: workflow} - if machine_or_gateset in _workflow_registry: - if optimization_level in _workflow_registry[machine_or_gateset]: - m = f'Overwritting workflow for {machine_or_gateset} at level ' + if key in _compile_registry: + if optimization_level in _compile_registry[key]: + m = f'Overwritting workflow for {key} at level ' m += f'{optimization_level}. If multiple Namespace packages are ' m += 'installed, ensure that their __init__.py files do not ' m += 'attempt to overwrite the same default Workflows.' warnings.warn(m) - _workflow_registry[machine_or_gateset].update(new_workflow) + _compile_registry[key].update(new_workflow) else: - _workflow_registry[machine_or_gateset] = new_workflow + _compile_registry[key] = new_workflow From 74e53a00c95dc62de6cc236ae99b334d413f3142 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 10:57:49 -0700 Subject: [PATCH 147/197] only considers registered MachineModels --- bqskit/compiler/compile.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/bqskit/compiler/compile.py b/bqskit/compiler/compile.py index c6b9f8bab..4cea33922 100644 --- a/bqskit/compiler/compile.py +++ b/bqskit/compiler/compile.py @@ -16,7 +16,7 @@ from bqskit.compiler.gateset import GateSet from bqskit.compiler.machine import MachineModel from bqskit.compiler.passdata import PassData -from bqskit.compiler.registry import _workflow_registry +from bqskit.compiler.registry import _compile_registry from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike from bqskit.ir.circuit import Circuit @@ -673,15 +673,9 @@ def build_workflow( # Use a registered workflow if model is found in the registry for a given # optimization_level - for machine_or_gateset in _workflow_registry: - if isinstance(machine_or_gateset, GateSet): - gate_set = machine_or_gateset - else: - gate_set = machine_or_gateset.gate_set - gs_match = gate_set == model.gate_set - ol_found = optimization_level in _workflow_registry[machine_or_gateset] - if gs_match and ol_found: - return _workflow_registry[machine_or_gateset][optimization_level] + if model in _compile_registry: + if optimization_level in _compile_registry[model]: + return _compile_registry[model][optimization_level] if isinstance(input, Circuit): if input.num_qudits > max_synthesis_size: From cfa229d3e7d966f659acca5538cea4bb3e302f19 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 11:03:08 -0700 Subject: [PATCH 148/197] Updated tests for _compile_registry --- tests/compiler/test_registry.py | 42 ++++++++++----------------------- 1 file changed, 13 insertions(+), 29 deletions(-) diff --git a/tests/compiler/test_registry.py b/tests/compiler/test_registry.py index b5a3d501f..792828e83 100644 --- a/tests/compiler/test_registry.py +++ b/tests/compiler/test_registry.py @@ -9,7 +9,7 @@ from bqskit.compiler import compile from bqskit.compiler.machine import MachineModel -from bqskit.compiler.registry import _workflow_registry +from bqskit.compiler.registry import _compile_registry from bqskit.compiler.registry import register_workflow from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike @@ -73,24 +73,24 @@ class TestRegisterWorkflow: @pytest.fixture(autouse=True) def setup(self) -> None: - # global _workflow_registry - _workflow_registry.clear() + # global _compile_registry + _compile_registry.clear() def test_register_workflow(self) -> None: - global _workflow_registry - assert _workflow_registry == {} + global _compile_registry + assert _compile_registry == {} gateset = [CZGate(), HGate(), RZGate()] num_qudits = 3 machine = MachineModel(num_qudits, gate_set=gateset) workflow = [QuickPartitioner(), ScanningGateRemovalPass()] register_workflow(machine, workflow, 1) - assert machine in _workflow_registry - assert 1 in _workflow_registry[machine] - assert workflow_match(_workflow_registry[machine][1], workflow) + assert machine in _compile_registry + assert 1 in _compile_registry[machine] + assert workflow_match(_compile_registry[machine][1], workflow) def test_custom_compile_machine(self) -> None: - global _workflow_registry - assert _workflow_registry == {} + global _compile_registry + assert _compile_registry == {} gateset = [CZGate(), HGate(), RZGate()] num_qudits = 3 machine = MachineModel(num_qudits, gate_set=gateset) @@ -104,30 +104,14 @@ def test_custom_compile_machine(self) -> None: result.unfold_all() assert result.gate_counts == circuit.gate_counts - def test_custom_compile_gateset(self) -> None: - global _workflow_registry - assert _workflow_registry == {} - gateset = [CZGate(), HGate(), RZGate()] - num_qudits = 3 - machine = MachineModel(num_qudits, gate_set=gateset) - workflow = [QuickPartitioner(2)] - register_workflow(gateset, workflow, 1) - circuit = simple_circuit(num_qudits, gateset) - result = compile(circuit, machine) - assert unitary_match(result, circuit) - assert result.num_operations > 0 - assert result.gate_counts != circuit.gate_counts - result.unfold_all() - assert result.gate_counts == circuit.gate_counts - def test_custom_opt_level(self) -> None: - global _workflow_registry - assert _workflow_registry == {} + global _compile_registry + assert _compile_registry == {} gateset = [CZGate(), HGate(), RZGate()] num_qudits = 3 machine = MachineModel(num_qudits, gate_set=gateset) workflow = [QSearchSynthesisPass()] - register_workflow(gateset, workflow, 2) + register_workflow(machine, workflow, 2) circuit = simple_circuit(num_qudits, gateset) result = compile(circuit, machine, optimization_level=2) assert unitary_match(result, circuit) From 47fa2ed2e4de08c0b597fb001fd069d199f5aa36 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 11:04:39 -0700 Subject: [PATCH 149/197] Removed unused import --- bqskit/compiler/compile.py | 1 - 1 file changed, 1 deletion(-) diff --git a/bqskit/compiler/compile.py b/bqskit/compiler/compile.py index 4cea33922..cac057bd3 100644 --- a/bqskit/compiler/compile.py +++ b/bqskit/compiler/compile.py @@ -13,7 +13,6 @@ import numpy as np from bqskit.compiler.compiler import Compiler -from bqskit.compiler.gateset import GateSet from bqskit.compiler.machine import MachineModel from bqskit.compiler.passdata import PassData from bqskit.compiler.registry import _compile_registry From bae9fee7d3809f6c850bbefd0d1efdb77fc56b1e Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 11:05:22 -0700 Subject: [PATCH 150/197] pre-commit --- bqskit/compiler/registry.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bqskit/compiler/registry.py b/bqskit/compiler/registry.py index 6097a4e78..b3cb1b4d2 100644 --- a/bqskit/compiler/registry.py +++ b/bqskit/compiler/registry.py @@ -19,14 +19,14 @@ def register_workflow( """ Register a workflow for a given MachineModel. - The _compile_registry enables MachineModel specific workflows to be + The _compile_registry enables MachineModel specific workflows to be registered for use in the `bqskit.compile` method. _compile_registry maps MachineModels a dictionary of Workflows which are indexed by optimization - level. This object should not be accessed directly by the user, but + level. This object should not be accessed directly by the user, but instead through the `register_workflow` function. Args: - key (MachineModel): A MachineModel to register the workflow under. + key (MachineModel): A MachineModel to register the workflow under. If a circuit is compiled targeting this machine or gate set, the registered workflow will be used. From 1f94658cfaf620b2a2eb394e08c4f1500f6970bb Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 11:11:26 -0700 Subject: [PATCH 151/197] Fixed imports --- tests/compiler/test_registry.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/compiler/test_registry.py b/tests/compiler/test_registry.py index 792828e83..6371211c9 100644 --- a/tests/compiler/test_registry.py +++ b/tests/compiler/test_registry.py @@ -7,7 +7,7 @@ import pytest from numpy import allclose -from bqskit.compiler import compile +from bqskit.compiler.compile import compile from bqskit.compiler.machine import MachineModel from bqskit.compiler.registry import _compile_registry from bqskit.compiler.registry import register_workflow From 746219b73cd526f9fb1ea2895e8628d51f5aee07 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 11:28:31 -0700 Subject: [PATCH 152/197] Documentation --- bqskit/passes/search/generators/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bqskit/passes/search/generators/__init__.py b/bqskit/passes/search/generators/__init__.py index 2453b7c84..dae5ac695 100644 --- a/bqskit/passes/search/generators/__init__.py +++ b/bqskit/passes/search/generators/__init__.py @@ -1,6 +1,7 @@ """This package contains LayerGenerator definitions.""" from __future__ import annotations +from bqskit.passes.search.generators.discrete import DiscreteLayerGenerator from bqskit.passes.search.generators.fourparam import FourParamGenerator from bqskit.passes.search.generators.middleout import MiddleOutLayerGenerator from bqskit.passes.search.generators.seed import SeedLayerGenerator @@ -8,9 +9,9 @@ from bqskit.passes.search.generators.single import SingleQuditLayerGenerator from bqskit.passes.search.generators.stair import StairLayerGenerator from bqskit.passes.search.generators.wide import WideLayerGenerator -from bqskit.passes.search.generators.discrete import DiscreteLayerGenerator __all__ = [ + 'DiscreteLayerGenerator', 'FourParamGenerator', 'MiddleOutLayerGenerator', 'SeedLayerGenerator', From 70059ffad7fa65b28f78bcbdcfc46c868cedbde1 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 11:29:06 -0700 Subject: [PATCH 153/197] Reodered imports --- bqskit/passes/synthesis/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/passes/synthesis/__init__.py b/bqskit/passes/synthesis/__init__.py index 6ebe9d814..eea097231 100644 --- a/bqskit/passes/synthesis/__init__.py +++ b/bqskit/passes/synthesis/__init__.py @@ -1,6 +1,7 @@ """This package implements synthesis passes and synthesis related classes.""" from __future__ import annotations +from bqskit.passes.synthesis.diagonal import WalshDiagonalSynthesisPass from bqskit.passes.synthesis.leap import LEAPSynthesisPass from bqskit.passes.synthesis.pas import PermutationAwareSynthesisPass from bqskit.passes.synthesis.qfast import QFASTDecompositionPass @@ -8,7 +9,6 @@ from bqskit.passes.synthesis.qsearch import QSearchSynthesisPass from bqskit.passes.synthesis.synthesis import SynthesisPass from bqskit.passes.synthesis.target import SetTargetPass -from bqskit.passes.synthesis.diagonal import WalshDiagonalSynthesisPass __all__ = [ 'LEAPSynthesisPass', From ef862cdd07bcf3f224f4e15fb0a08ce747186c04 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 28 Aug 2024 16:37:38 -0700 Subject: [PATCH 154/197] Switched np.sqrt to math.sqrt for documentation generation purposes --- bqskit/ir/gates/constant/h.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/bqskit/ir/gates/constant/h.py b/bqskit/ir/gates/constant/h.py index 633db3be2..ec760fe6f 100644 --- a/bqskit/ir/gates/constant/h.py +++ b/bqskit/ir/gates/constant/h.py @@ -1,7 +1,13 @@ """This module implements the HGate.""" from __future__ import annotations -import numpy as np +from math import sqrt + +from numpy import array +from numpy import complex128 +from numpy import exp +from numpy import pi +from numpy import zeros from bqskit.ir.gates.constantgate import ConstantGate from bqskit.ir.gates.quditgate import QuditGate @@ -67,22 +73,22 @@ def __init__(self, radix: int = 2) -> None: # Calculate unitary if radix == 2: - matrix = np.array( + matrix = array( [ - [np.sqrt(2) / 2, np.sqrt(2) / 2], - [np.sqrt(2) / 2, -np.sqrt(2) / 2], + [sqrt(2) / 2, sqrt(2) / 2], + [sqrt(2) / 2, -sqrt(2) / 2], ], - dtype=np.complex128, + dtype=complex128, ) self._utry = UnitaryMatrix(matrix) else: - matrix = np.zeros([radix] * 2, dtype=np.complex128) - omega = np.exp(2j * np.pi / radix) + matrix = zeros([radix] * 2, dtype=complex128) + omega = exp(2j * pi / radix) for i in range(radix): for j in range(i, radix): val = omega ** (i * j) matrix[i, j] = val matrix[j, i] = val - matrix *= 1 / np.sqrt(radix) + matrix *= 1 / sqrt(radix) self._utry = UnitaryMatrix(matrix, self.radixes) From c4c2df58a6392659c31be2144e0a91e99c14f5fd Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Fri, 30 Aug 2024 11:49:38 -0400 Subject: [PATCH 155/197] Import pi from math --- bqskit/ir/gates/constant/h.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/ir/gates/constant/h.py b/bqskit/ir/gates/constant/h.py index ec760fe6f..d53c68cb0 100644 --- a/bqskit/ir/gates/constant/h.py +++ b/bqskit/ir/gates/constant/h.py @@ -2,11 +2,11 @@ from __future__ import annotations from math import sqrt +from math import pi from numpy import array from numpy import complex128 from numpy import exp -from numpy import pi from numpy import zeros from bqskit.ir.gates.constantgate import ConstantGate From 91339229a31132ba9902d3bd12520cc2c01abff3 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Fri, 30 Aug 2024 11:51:01 -0400 Subject: [PATCH 156/197] Precise imports --- bqskit/passes/search/generators/discrete.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bqskit/passes/search/generators/discrete.py b/bqskit/passes/search/generators/discrete.py index 61abb12d4..c8654557b 100644 --- a/bqskit/passes/search/generators/discrete.py +++ b/bqskit/passes/search/generators/discrete.py @@ -7,9 +7,9 @@ from bqskit.compiler.passdata import PassData from bqskit.ir.circuit import Circuit from bqskit.ir.gate import Gate -from bqskit.ir.gates import CNOTGate -from bqskit.ir.gates import HGate -from bqskit.ir.gates import TGate +from bqskit.ir.gates.constant.cx import CNOTGate +from bqskit.ir.gates.constant.h import HGate +from bqskit.ir.gates.constant.t import TGate from bqskit.ir.gates.parameterized.pauliz import PauliZGate from bqskit.ir.operation import Operation from bqskit.passes.search.generator import LayerGenerator @@ -40,7 +40,7 @@ def __init__( Args: gateset (Sequence[Gate]): A sequence of gates that can be used in the output circuit. These must be non-parameterized gates. - (Default: [HGate(), TGate(), CNOTGate()]) + (Default: [HGate, TGate, CNOTGate]) double_headed (bool): If True, successors will be generated by both appending and prepending gates. (Default: False) From 1d41441ce449a67875015468e32ad1ba4aeb0a8a Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Fri, 30 Aug 2024 11:51:49 -0400 Subject: [PATCH 157/197] Reordered imports --- bqskit/ir/gates/constant/h.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/ir/gates/constant/h.py b/bqskit/ir/gates/constant/h.py index d53c68cb0..34fafa34d 100644 --- a/bqskit/ir/gates/constant/h.py +++ b/bqskit/ir/gates/constant/h.py @@ -1,8 +1,8 @@ """This module implements the HGate.""" from __future__ import annotations -from math import sqrt from math import pi +from math import sqrt from numpy import array from numpy import complex128 From 60982b754ce426312978ce9f04ec10aea40d6429 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sat, 31 Aug 2024 13:36:28 -0400 Subject: [PATCH 158/197] Protect runtime outgoing threads better --- bqskit/runtime/base.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/bqskit/runtime/base.py b/bqskit/runtime/base.py index d10996e49..bcc9f03fc 100644 --- a/bqskit/runtime/base.py +++ b/bqskit/runtime/base.py @@ -410,7 +410,13 @@ def send_outgoing(self) -> None: if outgoing[0].closed: continue - outgoing[0].send((outgoing[1], outgoing[2])) + try: + outgoing[0].send((outgoing[1], outgoing[2])) + except (EOFError, ConnectionResetError): + self.handle_disconnect(outgoing[0]) + _logger.warning('Connection reset while sending message.') + continue + if _logger.isEnabledFor(logging.DEBUG): to = self.get_to_string(outgoing[0]) _logger.debug(f'Sent message {outgoing[1].name} to {to}.') From 41c6ff03c8221ba526fd0e6136321e23974a9cd2 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sat, 31 Aug 2024 14:07:03 -0400 Subject: [PATCH 159/197] Removed small memory leak --- bqskit/runtime/detached.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index 8740c7170..7ef5e8a75 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -256,8 +256,19 @@ def handle_disconnect(self, conn: Connection) -> None: """Disconnect a client connection from the runtime.""" super().handle_disconnect(conn) tasks = self.clients.pop(conn) + for task_id in tasks: self.handle_cancel_comp_task(task_id) + + tasks_to_pop = [] + for (task, (tid, other_conn)) in self.tasks.items(): + if other_conn == conn: + tasks_to_pop.append((task_id, tid)) + + for task_id, tid in tasks_to_pop: + self.tasks.pop(task_id) + self.mailbox_to_task_dict.pop(tid) + _logger.info('Unregistered client.') def handle_new_comp_task( @@ -386,6 +397,9 @@ def handle_error(self, error_payload: tuple[int, str]) -> None: raise RuntimeError(error_payload) tid = error_payload[0] + if tid not in self.mailbox_to_task_dict: + return # Silently discard errors from cancelled tasks + conn = self.tasks[self.mailbox_to_task_dict[tid]][1] self.outgoing.put((conn, RuntimeMessage.ERROR, error_payload[1])) # TODO: Broadcast cancel to all tasks with compilation task id tid @@ -398,6 +412,9 @@ def handle_error(self, error_payload: tuple[int, str]) -> None: def handle_log(self, log_payload: tuple[int, LogRecord]) -> None: """Forward logs to appropriate client.""" tid = log_payload[0] + if tid not in self.mailbox_to_task_dict: + return # Silently discard logs from cancelled tasks + conn = self.tasks[self.mailbox_to_task_dict[tid]][1] self.outgoing.put((conn, RuntimeMessage.LOG, log_payload[1])) From ae093679afd226e3d4e224dfc2de4c691a587647 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sat, 31 Aug 2024 16:23:29 -0400 Subject: [PATCH 160/197] Allow nonserializable logs through runtime --- bqskit/compiler/compiler.py | 14 ++++++++--- bqskit/runtime/detached.py | 2 +- bqskit/runtime/worker.py | 11 +++++++- tests/runtime/test_logging.py | 47 +++++++++++++++++++++++++++++++++++ 4 files changed, 69 insertions(+), 5 deletions(-) diff --git a/bqskit/compiler/compiler.py b/bqskit/compiler/compiler.py index 8f58f3c35..58452b91d 100644 --- a/bqskit/compiler/compiler.py +++ b/bqskit/compiler/compiler.py @@ -4,6 +4,7 @@ import atexit import functools import logging +import pickle import signal import subprocess import sys @@ -439,9 +440,16 @@ def _recv_handle_log_error(self) -> tuple[RuntimeMessage, Any]: msg, payload = self.conn.recv() if msg == RuntimeMessage.LOG: - logger = logging.getLogger(payload.name) - if logger.isEnabledFor(payload.levelno): - logger.handle(payload) + record = pickle.loads(payload) + if isinstance(record, logging.LogRecord): + logger = logging.getLogger(record.name) + if logger.isEnabledFor(record.levelno): + logger.handle(record) + else: + name, levelno, msg = record + logger = logging.getLogger(name) + logger.log(levelno, msg) + elif msg == RuntimeMessage.ERROR: raise RuntimeError(payload) diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index 7ef5e8a75..a67768734 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -409,7 +409,7 @@ def handle_error(self, error_payload: tuple[int, str]) -> None: # still cancel here incase the client catches the error and # resubmits a job. - def handle_log(self, log_payload: tuple[int, LogRecord]) -> None: + def handle_log(self, log_payload: tuple[int, bytes]) -> None: """Forward logs to appropriate client.""" tid = log_payload[0] if tid not in self.mailbox_to_task_dict: diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index 1684f7dbc..fd6b2030e 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -3,6 +3,7 @@ import argparse import logging +import pickle import os import signal import sys @@ -225,7 +226,15 @@ def record_factory(*args: Any, **kwargs: Any) -> logging.LogRecord: record.msg += con_str record.msg += ']' tid = active_task.comp_task_id - self._conn.send((RuntimeMessage.LOG, (tid, record))) + try: + serial = pickle.dumps(record) + except (pickle.PicklingError, TypeError): + serial = pickle.dumps(( + record.name, + record.levelno, + record.getMessage(), + )) + self._conn.send((RuntimeMessage.LOG, (tid, serial))) return record logging.setLogRecordFactory(record_factory) diff --git a/tests/runtime/test_logging.py b/tests/runtime/test_logging.py index bee4bfb0a..ad5778f8e 100644 --- a/tests/runtime/test_logging.py +++ b/tests/runtime/test_logging.py @@ -2,6 +2,7 @@ from __future__ import annotations import logging +import pickle from io import StringIO import pytest @@ -142,6 +143,52 @@ def test_using_external_logging(server_compiler: Compiler) -> None: logger.removeHandler(handler) logger.setLevel(logging.WARNING) +class ExternalWithArgsPass(BasePass): + async def run(self, circuit, pass_data): + logging.getLogger("dummy2").debug('int %d', 1) + +def test_external_logging_with_args(server_compiler: Compiler) -> None: + logger = logging.getLogger('dummy2') + logger.setLevel(logging.DEBUG) + handler = logging.StreamHandler(StringIO()) + handler.setLevel(logging.DEBUG) + logger.addHandler(handler) + server_compiler.compile(Circuit(1), [ExternalWithArgsPass()]) + log = handler.stream.getvalue() + assert 'int 1' in log + logger.removeHandler(handler) + logger.setLevel(logging.WARNING) + + +class NonSerializable: + def __reduce__(self): + raise pickle.PicklingError("This class is not serializable") + def __str__(self): + return "NonSerializable" + + +class ExternalWithNonSerializableArgsPass(BasePass): + async def run(self, circuit, pass_data): + logging.getLogger("dummy2").debug( + 'NonSerializable %s', + NonSerializable() + ) + + +def test_external_logging_with_nonserializable_args( + server_compiler: Compiler +) -> None: + logger = logging.getLogger('dummy2') + logger.setLevel(logging.DEBUG) + handler = logging.StreamHandler(StringIO()) + handler.setLevel(logging.DEBUG) + logger.addHandler(handler) + server_compiler.compile(Circuit(1), [ExternalWithNonSerializableArgsPass()]) + log = handler.stream.getvalue() + assert 'NonSerializable NonSerializable' in log + logger.removeHandler(handler) + logger.setLevel(logging.WARNING) + @pytest.mark.parametrize('level', [-1, 0, 1, 2, 3, 4]) def test_limiting_nested_calls_enable_logging( From e3d557e18b57b758befe5922907a176c5a165b1b Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sat, 31 Aug 2024 16:30:19 -0400 Subject: [PATCH 161/197] Added layer info to successful log on synthesis --- bqskit/passes/synthesis/leap.py | 6 ++++-- bqskit/passes/synthesis/qfast.py | 2 +- bqskit/passes/synthesis/qsearch.py | 8 +++++--- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/bqskit/passes/synthesis/leap.py b/bqskit/passes/synthesis/leap.py index da7475fae..30ec70487 100644 --- a/bqskit/passes/synthesis/leap.py +++ b/bqskit/passes/synthesis/leap.py @@ -196,7 +196,7 @@ async def synthesize( # Evalute initial layer if best_dist < self.success_threshold: - _logger.debug('Successful synthesis.') + _logger.debug('Successful synthesis with 0 layers.') return initial_layer # Main loop @@ -222,7 +222,9 @@ async def synthesize( dist = self.cost.calc_cost(circuit, utry) if dist < self.success_threshold: - _logger.debug('Successful synthesis.') + _logger.debug( + f'Successful synthesis with {layer + 1} layers.' + ) if self.store_partial_solutions: data['psols'] = psols return circuit diff --git a/bqskit/passes/synthesis/qfast.py b/bqskit/passes/synthesis/qfast.py index 9a72bd79c..e4e036fb1 100644 --- a/bqskit/passes/synthesis/qfast.py +++ b/bqskit/passes/synthesis/qfast.py @@ -164,7 +164,7 @@ async def synthesize( if dist < self.success_threshold: self.finalize(circuit, utry, instantiate_options) - _logger.info('Successful synthesis.') + _logger.info(f'Successful synthesis with {depth} layers.') return circuit # Expand or restrict head diff --git a/bqskit/passes/synthesis/qsearch.py b/bqskit/passes/synthesis/qsearch.py index 13276ad82..c657bc702 100644 --- a/bqskit/passes/synthesis/qsearch.py +++ b/bqskit/passes/synthesis/qsearch.py @@ -171,7 +171,7 @@ async def synthesize( # Evalute initial layer if best_dist < self.success_threshold: - _logger.debug('Successful synthesis.') + _logger.debug('Successful synthesis with 0 layers.') return initial_layer # Main loop @@ -197,7 +197,9 @@ async def synthesize( dist = self.cost.calc_cost(circuit, utry) if dist < self.success_threshold: - _logger.debug('Successful synthesis.') + _logger.debug( + f'Successful synthesis with {layer + 1} layers.' + ) if self.store_partial_solutions: data['psols'] = psols return circuit @@ -210,7 +212,7 @@ async def synthesize( ) best_dist = dist best_circ = circuit - best_layer = layer + best_layer = layer + 1 if self.store_partial_solutions: if layer not in psols: From 8cb016551087605b4456187a1a2dbf5f8f7dbe3a Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sat, 31 Aug 2024 16:33:04 -0400 Subject: [PATCH 162/197] pre-commit --- bqskit/compiler/compiler.py | 1 - bqskit/passes/synthesis/leap.py | 2 +- bqskit/passes/synthesis/qsearch.py | 2 +- bqskit/runtime/detached.py | 3 +-- bqskit/runtime/worker.py | 2 +- tests/runtime/test_logging.py | 24 ++++++++++++++---------- 6 files changed, 18 insertions(+), 16 deletions(-) diff --git a/bqskit/compiler/compiler.py b/bqskit/compiler/compiler.py index 58452b91d..1a94ef9bb 100644 --- a/bqskit/compiler/compiler.py +++ b/bqskit/compiler/compiler.py @@ -450,7 +450,6 @@ def _recv_handle_log_error(self) -> tuple[RuntimeMessage, Any]: logger = logging.getLogger(name) logger.log(levelno, msg) - elif msg == RuntimeMessage.ERROR: raise RuntimeError(payload) diff --git a/bqskit/passes/synthesis/leap.py b/bqskit/passes/synthesis/leap.py index 30ec70487..f05300eef 100644 --- a/bqskit/passes/synthesis/leap.py +++ b/bqskit/passes/synthesis/leap.py @@ -223,7 +223,7 @@ async def synthesize( if dist < self.success_threshold: _logger.debug( - f'Successful synthesis with {layer + 1} layers.' + f'Successful synthesis with {layer + 1} layers.', ) if self.store_partial_solutions: data['psols'] = psols diff --git a/bqskit/passes/synthesis/qsearch.py b/bqskit/passes/synthesis/qsearch.py index c657bc702..9cad4fc44 100644 --- a/bqskit/passes/synthesis/qsearch.py +++ b/bqskit/passes/synthesis/qsearch.py @@ -198,7 +198,7 @@ async def synthesize( if dist < self.success_threshold: _logger.debug( - f'Successful synthesis with {layer + 1} layers.' + f'Successful synthesis with {layer + 1} layers.', ) if self.store_partial_solutions: data['psols'] = psols diff --git a/bqskit/runtime/detached.py b/bqskit/runtime/detached.py index a67768734..ea32afbd6 100644 --- a/bqskit/runtime/detached.py +++ b/bqskit/runtime/detached.py @@ -8,7 +8,6 @@ import time import uuid from dataclasses import dataclass -from logging import LogRecord from multiprocessing.connection import Connection from multiprocessing.connection import Listener from threading import Thread @@ -263,7 +262,7 @@ def handle_disconnect(self, conn: Connection) -> None: tasks_to_pop = [] for (task, (tid, other_conn)) in self.tasks.items(): if other_conn == conn: - tasks_to_pop.append((task_id, tid)) + tasks_to_pop.append((task_id, tid)) for task_id, tid in tasks_to_pop: self.tasks.pop(task_id) diff --git a/bqskit/runtime/worker.py b/bqskit/runtime/worker.py index fd6b2030e..e61b13009 100644 --- a/bqskit/runtime/worker.py +++ b/bqskit/runtime/worker.py @@ -3,8 +3,8 @@ import argparse import logging -import pickle import os +import pickle import signal import sys import time diff --git a/tests/runtime/test_logging.py b/tests/runtime/test_logging.py index ad5778f8e..4c8694439 100644 --- a/tests/runtime/test_logging.py +++ b/tests/runtime/test_logging.py @@ -4,6 +4,7 @@ import logging import pickle from io import StringIO +from typing import Any import pytest @@ -143,9 +144,11 @@ def test_using_external_logging(server_compiler: Compiler) -> None: logger.removeHandler(handler) logger.setLevel(logging.WARNING) + class ExternalWithArgsPass(BasePass): - async def run(self, circuit, pass_data): - logging.getLogger("dummy2").debug('int %d', 1) + async def run(self, circuit: Circuit, data: PassData) -> None: + logging.getLogger('dummy2').debug('int %d', 1) + def test_external_logging_with_args(server_compiler: Compiler) -> None: logger = logging.getLogger('dummy2') @@ -161,22 +164,23 @@ def test_external_logging_with_args(server_compiler: Compiler) -> None: class NonSerializable: - def __reduce__(self): - raise pickle.PicklingError("This class is not serializable") - def __str__(self): - return "NonSerializable" + def __reduce__(self) -> str | tuple[Any, ...]: + raise pickle.PicklingError('This class is not serializable') + + def __str__(self) -> str: + return 'NonSerializable' class ExternalWithNonSerializableArgsPass(BasePass): - async def run(self, circuit, pass_data): - logging.getLogger("dummy2").debug( + async def run(self, circuit: Circuit, data: PassData) -> None: + logging.getLogger('dummy2').debug( 'NonSerializable %s', - NonSerializable() + NonSerializable(), ) def test_external_logging_with_nonserializable_args( - server_compiler: Compiler + server_compiler: Compiler, ) -> None: logger = logging.getLogger('dummy2') logger.setLevel(logging.DEBUG) From 0b557fb4388986a706a6d99806f8fe836bc40a90 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sat, 31 Aug 2024 16:35:18 -0400 Subject: [PATCH 163/197] Fixed pass list docs --- bqskit/passes/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bqskit/passes/__init__.py b/bqskit/passes/__init__.py index 9a386f6fb..3a9882ee4 100644 --- a/bqskit/passes/__init__.py +++ b/bqskit/passes/__init__.py @@ -138,6 +138,10 @@ These passes either perform upper-bound error analysis of the PAM process. +.. autosummary:: + :toctree: autogen + :recursive: + TagPAMBlockDataPass CalculatePAMErrorsPass UnTagPAMBlockDataPass From 38e52364b5923f9c21c0f9fc94a35045fcfa3c5a Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sat, 31 Aug 2024 16:46:10 -0400 Subject: [PATCH 164/197] Attempt to fix #245 --- docs/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index f48262b93..ccb995325 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -75,8 +75,8 @@ # 'IntervalLike': 'bqskit.ir.IntervalLike', # } # napoleon_type_aliases = autodoc_type_aliases -autodoc_typehints = 'description' -autodoc_typehints_description_target = 'documented' +autodoc_typehints = 'both' +autodoc_typehints_description_target = 'all' autoclass_content = 'class' nbsphinx_output_prompt = 'Out[%s]:' From d1b9c38a48f652f760c19f0612b4a5845bafc833 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sat, 31 Aug 2024 17:02:05 -0400 Subject: [PATCH 165/197] Second attempt --- docs/conf.py | 5 +++-- docs/requirements.txt | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index ccb995325..45036da82 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -38,6 +38,7 @@ 'myst_parser', 'jupyter_sphinx', 'nbsphinx', + 'sphinx_autodoc_typehints', ] # Add any paths that contain templates here, relative to this directory. @@ -75,8 +76,8 @@ # 'IntervalLike': 'bqskit.ir.IntervalLike', # } # napoleon_type_aliases = autodoc_type_aliases -autodoc_typehints = 'both' -autodoc_typehints_description_target = 'all' +autodoc_typehints = 'description' +autodoc_typehints_description_target = 'documented' autoclass_content = 'class' nbsphinx_output_prompt = 'Out[%s]:' diff --git a/docs/requirements.txt b/docs/requirements.txt index 45bb175e7..a88d4eaa7 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -2,6 +2,7 @@ Sphinx>=4.5.0 sphinx-autodoc-typehints>=1.12.0 sphinx-rtd-theme>=1.0.0 sphinx-togglebutton>=0.2.3 +sphinx-autodoc-typehints>=2.3.0 sphinxcontrib-applehelp>=1.0.2 sphinxcontrib-devhelp>=1.0.2 sphinxcontrib-htmlhelp>=2.0.0 From de21e49a0b61f53902f2237dd796edc99c13155f Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sat, 31 Aug 2024 17:09:29 -0400 Subject: [PATCH 166/197] Manual fix --- bqskit/compiler/compiler.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/bqskit/compiler/compiler.py b/bqskit/compiler/compiler.py index 1a94ef9bb..6127c3745 100644 --- a/bqskit/compiler/compiler.py +++ b/bqskit/compiler/compiler.py @@ -313,7 +313,15 @@ def submit( return task.task_id def status(self, task_id: uuid.UUID) -> CompilationStatus: - """Retrieve the status of the specified task.""" + """ + Retrieve the status of the specified task. + + Args: + task_id (uuid.UUID): The ID of the task to check. + + Returns: + CompilationStatus: The status of the task. + """ msg, payload = self._send_recv(RuntimeMessage.STATUS, task_id) if msg != RuntimeMessage.STATUS: raise RuntimeError(f'Unexpected message type: {msg}.') From f061beff9ec6270047bb40ddb9d0fc458b68d27f Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sat, 31 Aug 2024 17:14:10 -0400 Subject: [PATCH 167/197] Fixed DiagonalSynthesis Doc Issue --- bqskit/passes/__init__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bqskit/passes/__init__.py b/bqskit/passes/__init__.py index 3a9882ee4..389d42ec5 100644 --- a/bqskit/passes/__init__.py +++ b/bqskit/passes/__init__.py @@ -28,7 +28,7 @@ :toctree: autogen :recursive: - DiagonalSynthesisPass + WalshDiagonalSynthesisPass LEAPSynthesisPass QSearchSynthesisPass QFASTDecompositionPass @@ -289,6 +289,7 @@ from bqskit.passes.search.heuristics.astar import AStarHeuristic from bqskit.passes.search.heuristics.dijkstra import DijkstraHeuristic from bqskit.passes.search.heuristics.greedy import GreedyHeuristic +from bqskit.passes.synthesis.diagonal import WalshDiagonalSynthesisPass from bqskit.passes.synthesis.leap import LEAPSynthesisPass from bqskit.passes.synthesis.pas import PermutationAwareSynthesisPass from bqskit.passes.synthesis.qfast import QFASTDecompositionPass @@ -326,7 +327,7 @@ 'ScanPartitioner', 'QuickPartitioner', 'SynthesisPass', - 'DiagonalSynthesisPass', + 'WalshDiagonalSynthesisPass', 'LEAPSynthesisPass', 'QSearchSynthesisPass', 'QFASTDecompositionPass', From 68bc1f3def57f447019c06be77d2f3fab703d2c2 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Tue, 3 Sep 2024 11:08:23 -0400 Subject: [PATCH 168/197] TypeError changed to ValueError --- bqskit/passes/search/generators/discrete.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bqskit/passes/search/generators/discrete.py b/bqskit/passes/search/generators/discrete.py index c8654557b..290a571d9 100644 --- a/bqskit/passes/search/generators/discrete.py +++ b/bqskit/passes/search/generators/discrete.py @@ -46,7 +46,7 @@ def __init__( both appending and prepending gates. (Default: False) Raises: - TypeError: If the gateset is not a sequence. + ValueError: If the gateset is not a sequence. TypeError: If the gateset contains a parameterized gate. @@ -54,7 +54,7 @@ def __init__( """ if not is_sequence(gateset): m = f'Expected sequence of gates, got {type(gateset)}.' - raise TypeError(m) + raise ValueError(m) radix = gateset[0].radixes[0] for gate in gateset: From 6a4d702e4a5e3e429006af9e9b8c626f71d7bb56 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Tue, 3 Sep 2024 11:30:20 -0400 Subject: [PATCH 169/197] TypeError changed to ValueError --- bqskit/passes/search/generators/discrete.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bqskit/passes/search/generators/discrete.py b/bqskit/passes/search/generators/discrete.py index 290a571d9..cbbb8b445 100644 --- a/bqskit/passes/search/generators/discrete.py +++ b/bqskit/passes/search/generators/discrete.py @@ -50,7 +50,7 @@ def __init__( TypeError: If the gateset contains a parameterized gate. - TypeError: If the radices of gates are different. + ValueError: If the radices of gates are different. """ if not is_sequence(gateset): m = f'Expected sequence of gates, got {type(gateset)}.' @@ -66,7 +66,7 @@ def __init__( if rad != radix: m = f'Radix mismatch on gate: {gate}. ' m += f'Expected {radix}, got {rad}.' - raise TypeError(m) + raise ValueError(m) self.gateset = gateset self.double_headed = double_headed From 7920900d83a6e966b9fa6bd188eb75f99cbbde7b Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Tue, 3 Sep 2024 11:53:38 -0400 Subject: [PATCH 170/197] TypeError changed to ValueError --- bqskit/passes/search/generators/discrete.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/bqskit/passes/search/generators/discrete.py b/bqskit/passes/search/generators/discrete.py index cbbb8b445..906048cb9 100644 --- a/bqskit/passes/search/generators/discrete.py +++ b/bqskit/passes/search/generators/discrete.py @@ -43,12 +43,13 @@ def __init__( (Default: [HGate, TGate, CNOTGate]) double_headed (bool): If True, successors will be generated by - both appending and prepending gates. (Default: False) + both appending and prepending gates. This lets unitaries be + diagonalized instead of inverted. (Default: False) Raises: ValueError: If the gateset is not a sequence. - TypeError: If the gateset contains a parameterized gate. + ValueError: If the gateset contains a parameterized gate. ValueError: If the radices of gates are different. """ @@ -61,7 +62,7 @@ def __init__( if gate.num_params > 0: m = 'Expected gate for constant gates, got parameterized' m += f' {gate} gate.' - raise TypeError(m) + raise ValueError(m) for rad in gate.radixes: if rad != radix: m = f'Radix mismatch on gate: {gate}. ' From 69879c353e2232c2bc4974927c1b8098733a96a8 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Tue, 3 Sep 2024 13:14:59 -0400 Subject: [PATCH 171/197] Multi-radix support added --- bqskit/passes/search/generators/discrete.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/bqskit/passes/search/generators/discrete.py b/bqskit/passes/search/generators/discrete.py index 906048cb9..daeffe374 100644 --- a/bqskit/passes/search/generators/discrete.py +++ b/bqskit/passes/search/generators/discrete.py @@ -168,6 +168,8 @@ def add_to_successors(circuit: Circuit) -> None: for gate in singles: for qudit in range(circuit.num_qudits): + if gate.radixes[0] != circuit.radixes[qudit]: + continue if self.cancels_something(circuit, gate, (qudit,)): continue if isinstance(gate, TGate): @@ -188,6 +190,9 @@ def add_to_successors(circuit: Circuit) -> None: for edge in coupling_graph: if self.cancels_something(circuit, gate, edge): continue + qudit_radixes = [circuit.radixes[q] for q in edge] + if gate.radixes != qudit_radixes: + continue successor = circuit.copy() successor.append_gate(gate, edge) add_to_successors(successor) From 6bc2b13d4b7a0b22c1f29bafd509144d0f8bb767 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Tue, 3 Sep 2024 15:40:57 -0400 Subject: [PATCH 172/197] Register workflows by target type --- bqskit/compiler/compile.py | 31 ++++++++++++---- bqskit/compiler/registry.py | 40 +++++++++++++++++---- tests/compiler/test_registry.py | 63 +++++++++++++++++++++++++-------- 3 files changed, 106 insertions(+), 28 deletions(-) diff --git a/bqskit/compiler/compile.py b/bqskit/compiler/compile.py index 83c3d8880..61993c1e7 100644 --- a/bqskit/compiler/compile.py +++ b/bqskit/compiler/compile.py @@ -14,7 +14,10 @@ from bqskit.compiler.compiler import Compiler from bqskit.compiler.machine import MachineModel from bqskit.compiler.passdata import PassData -from bqskit.compiler.registry import _compile_registry +from bqskit.compiler.registry import _compile_circuit_registry +from bqskit.compiler.registry import _compile_statemap_registry +from bqskit.compiler.registry import _compile_stateprep_registry +from bqskit.compiler.registry import _compile_unitary_registry from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike from bqskit.ir.circuit import Circuit @@ -669,12 +672,6 @@ def build_workflow( if model is None: model = MachineModel(input.num_qudits, radixes=input.radixes) - # Use a registered workflow if model is found in the registry for a given - # optimization_level - if model in _compile_registry: - if optimization_level in _compile_registry[model]: - return _compile_registry[model][optimization_level] - if isinstance(input, Circuit): if input.num_qudits > max_synthesis_size: if any( @@ -691,6 +688,11 @@ def build_workflow( 'Unable to compile circuit with gate larger than' ' max_synthesis_size.\nConsider adjusting it.', ) + # Use a registered workflow if model is found in the circuit registry + # for a given optimization_level + if model in _compile_circuit_registry: + if optimization_level in _compile_circuit_registry[model]: + return _compile_circuit_registry[model][optimization_level] return _circuit_workflow( model, @@ -708,6 +710,11 @@ def build_workflow( 'Unable to compile unitary with size larger than' ' max_synthesis_size.\nConsider adjusting it.', ) + # Use a registered workflow if model is found in the unitary registry + # for a given optimization_level + if model in _compile_unitary_registry: + if optimization_level in _compile_unitary_registry[model]: + return _compile_unitary_registry[model][optimization_level] return _synthesis_workflow( input, @@ -726,6 +733,11 @@ def build_workflow( 'Unable to compile states with size larger than' ' max_synthesis_size.\nConsider adjusting it.', ) + # Use a registered workflow if model is found in the stateprep registry + # for a given optimization_level + if model in _compile_stateprep_registry: + if optimization_level in _compile_stateprep_registry[model]: + return _compile_stateprep_registry[model][optimization_level] return _stateprep_workflow( input, @@ -744,6 +756,11 @@ def build_workflow( 'Unable to compile state systems with size larger than' ' max_synthesis_size.\nConsider adjusting it.', ) + # Use a registered workflow if model is found in the statemap registry + # for a given optimization_level + if model in _compile_statemap_registry: + if optimization_level in _compile_statemap_registry[model]: + return _compile_statemap_registry[model][optimization_level] return _statemap_workflow( input, diff --git a/bqskit/compiler/registry.py b/bqskit/compiler/registry.py index b3cb1b4d2..ebbd0f4fa 100644 --- a/bqskit/compiler/registry.py +++ b/bqskit/compiler/registry.py @@ -8,13 +8,17 @@ from bqskit.compiler.workflow import WorkflowLike -_compile_registry: dict[MachineModel, dict[int, Workflow]] = {} +_compile_circuit_registry: dict[MachineModel, dict[int, Workflow]] = {} +_compile_unitary_registry: dict[MachineModel, dict[int, Workflow]] = {} +_compile_stateprep_registry: dict[MachineModel, dict[int, Workflow]] = {} +_compile_statemap_registry: dict[MachineModel, dict[int, Workflow]] = {} def register_workflow( key: MachineModel, workflow: WorkflowLike, optimization_level: int, + target_type: str, ) -> None: """ Register a workflow for a given MachineModel. @@ -34,10 +38,13 @@ def register_workflow( be executed if the MachineModel in a call to `compile` matches `key`. If `key` is already registered, a warning will be logged. - optimization_level ptional[int): The optimization level with which + optimization_level (Optional[int]): The optimization level with which to register the workflow. If no level is provided, the Workflow will be registered as level 1. + target_type (str): Register a workflow for targets of this type. Must + be 'circuit', 'unitary', 'stateprep', or 'statemap'. + Example: model_t = SpecificMachineModel(num_qudits, radixes) workflow = [QuickPartitioner(3), NewFangledOptimization()] @@ -47,17 +54,38 @@ def register_workflow( Raises: Warning: If a workflow for a given optimization_level is overwritten. + + ValueError: If `target_type` is not 'circuit', 'unitary', 'stateprep', + or 'statemap'. """ + if target_type not in ['circuit', 'unitary', 'stateprep', 'statemap']: + m = 'target_type must be "circuit", "unitary", "stateprep", or ' + m += f'"statemap", got {target_type}.' + raise ValueError(m) + + if target_type == 'circuit': + global _compile_circuit_registry + _compile_registry = _compile_circuit_registry + elif target_type == 'unitary': + global _compile_unitary_registry + _compile_registry = _compile_unitary_registry + elif target_type == 'stateprep': + global _compile_stateprep_registry + _compile_registry = _compile_stateprep_registry + else: + global _compile_statemap_registry + _compile_registry = _compile_statemap_registry + workflow = Workflow(workflow) - global _compile_registry new_workflow = {optimization_level: workflow} if key in _compile_registry: if optimization_level in _compile_registry[key]: m = f'Overwritting workflow for {key} at level ' - m += f'{optimization_level}. If multiple Namespace packages are ' - m += 'installed, ensure that their __init__.py files do not ' - m += 'attempt to overwrite the same default Workflows.' + m += f'{optimization_level} for target type {target_type}.' + m += 'If multiple Namespace packages are installed, ensure' + m += 'that their __init__.py files do not attempt to' + m += 'overwrite the same default Workflows.' warnings.warn(m) _compile_registry[key].update(new_workflow) else: diff --git a/tests/compiler/test_registry.py b/tests/compiler/test_registry.py index 6371211c9..4cc6065bc 100644 --- a/tests/compiler/test_registry.py +++ b/tests/compiler/test_registry.py @@ -9,7 +9,10 @@ from bqskit.compiler.compile import compile from bqskit.compiler.machine import MachineModel -from bqskit.compiler.registry import _compile_registry +from bqskit.compiler.registry import _compile_circuit_registry +from bqskit.compiler.registry import _compile_statemap_registry +from bqskit.compiler.registry import _compile_stateprep_registry +from bqskit.compiler.registry import _compile_unitary_registry from bqskit.compiler.registry import register_workflow from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike @@ -74,28 +77,58 @@ class TestRegisterWorkflow: @pytest.fixture(autouse=True) def setup(self) -> None: # global _compile_registry - _compile_registry.clear() + _compile_circuit_registry.clear() + _compile_unitary_registry.clear() + _compile_statemap_registry.clear() + _compile_stateprep_registry.clear() def test_register_workflow(self) -> None: - global _compile_registry - assert _compile_registry == {} + global _compile_circuit_registry + global _compile_unitary_registry + global _compile_statemap_registry + global _compile_stateprep_registry + assert _compile_circuit_registry == {} + assert _compile_unitary_registry == {} + assert _compile_statemap_registry == {} + assert _compile_stateprep_registry == {} gateset = [CZGate(), HGate(), RZGate()] num_qudits = 3 machine = MachineModel(num_qudits, gate_set=gateset) - workflow = [QuickPartitioner(), ScanningGateRemovalPass()] - register_workflow(machine, workflow, 1) - assert machine in _compile_registry - assert 1 in _compile_registry[machine] - assert workflow_match(_compile_registry[machine][1], workflow) + circuit_workflow = [QuickPartitioner(), ScanningGateRemovalPass()] + other_workflow = [QuickPartitioner(), QSearchSynthesisPass()] + register_workflow(machine, circuit_workflow, 1, 'circuit') + register_workflow(machine, other_workflow, 1, 'unitary') + register_workflow(machine, other_workflow, 1, 'statemap') + register_workflow(machine, other_workflow, 1, 'stateprep') + assert machine in _compile_circuit_registry + assert 1 in _compile_circuit_registry[machine] + assert workflow_match( + _compile_circuit_registry[machine][1], circuit_workflow, + ) + assert machine in _compile_unitary_registry + assert 1 in _compile_unitary_registry[machine] + assert workflow_match( + _compile_unitary_registry[machine][1], other_workflow, + ) + assert machine in _compile_statemap_registry + assert 1 in _compile_statemap_registry[machine] + assert workflow_match( + _compile_statemap_registry[machine][1], other_workflow, + ) + assert machine in _compile_stateprep_registry + assert 1 in _compile_stateprep_registry[machine] + assert workflow_match( + _compile_stateprep_registry[machine][1], other_workflow, + ) def test_custom_compile_machine(self) -> None: - global _compile_registry - assert _compile_registry == {} + global _compile_circuit_registry + assert _compile_circuit_registry == {} gateset = [CZGate(), HGate(), RZGate()] num_qudits = 3 machine = MachineModel(num_qudits, gate_set=gateset) workflow = [QuickPartitioner(2)] - register_workflow(machine, workflow, 1) + register_workflow(machine, workflow, 1, 'circuit') circuit = simple_circuit(num_qudits, gateset) result = compile(circuit, machine) assert unitary_match(result, circuit) @@ -105,13 +138,13 @@ def test_custom_compile_machine(self) -> None: assert result.gate_counts == circuit.gate_counts def test_custom_opt_level(self) -> None: - global _compile_registry - assert _compile_registry == {} + global _compile_circuit_registry + assert _compile_circuit_registry == {} gateset = [CZGate(), HGate(), RZGate()] num_qudits = 3 machine = MachineModel(num_qudits, gate_set=gateset) workflow = [QSearchSynthesisPass()] - register_workflow(machine, workflow, 2) + register_workflow(machine, workflow, 2, 'circuit') circuit = simple_circuit(num_qudits, gateset) result = compile(circuit, machine, optimization_level=2) assert unitary_match(result, circuit) From d769fb9fb12bae6b52d1f5c6e110f773bfbd8793 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Wed, 4 Sep 2024 09:05:34 -0400 Subject: [PATCH 173/197] Fixed bug for UnitaryMatrix inputs --- bqskit/compiler/compile.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bqskit/compiler/compile.py b/bqskit/compiler/compile.py index 61993c1e7..76ffa2ce3 100644 --- a/bqskit/compiler/compile.py +++ b/bqskit/compiler/compile.py @@ -625,8 +625,11 @@ def type_and_check_input(input: CompilationInputLike) -> CompilationInput: if isinstance(typed_input, Circuit): in_circuit = typed_input + elif isinstance(typed_input, UnitaryMatrix): + in_circuit = Circuit.from_unitary(typed_input) + else: - in_circuit = Circuit(1) + in_circuit = Circuit(typed_input.num_qudits, typed_input.radixes) # Perform the compilation out, data = compiler.compile(in_circuit, workflow, True) From 9fc4126511f514e0bdb11e87bb5a1149a5f663f4 Mon Sep 17 00:00:00 2001 From: Alon Kukliansky Date: Wed, 4 Sep 2024 15:53:24 -0700 Subject: [PATCH 174/197] Adding the original exception only in uncommon cases --- bqskit/compiler/compiler.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/bqskit/compiler/compiler.py b/bqskit/compiler/compiler.py index 8f58f3c35..e193a68cd 100644 --- a/bqskit/compiler/compiler.py +++ b/bqskit/compiler/compiler.py @@ -406,7 +406,12 @@ def _send(self, msg: RuntimeMessage, payload: Any) -> None: except Exception as e: self.conn = None self.close() - raise RuntimeError('Server connection unexpectedly closed.') from e + if isinstance(e, (EOFError, ConnectionResetError)): + raise RuntimeError('Server connection unexpectedly closed.') + else: + raise RuntimeError( + 'Server connection unexpectedly closed.', + ) from e def _send_recv( self, From 91a91296c5b9be4342e3a7432ace0f4ea27383e0 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 5 Sep 2024 12:14:26 -0400 Subject: [PATCH 175/197] remote link specification in graph --- bqskit/qis/graph.py | 159 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 140 insertions(+), 19 deletions(-) diff --git a/bqskit/qis/graph.py b/bqskit/qis/graph.py index 7a7ae7c30..5439a3b40 100644 --- a/bqskit/qis/graph.py +++ b/bqskit/qis/graph.py @@ -14,6 +14,7 @@ from typing import Tuple from typing import TYPE_CHECKING from typing import Union +from typing import Mapping import numpy as np @@ -23,7 +24,7 @@ from bqskit.ir.location import CircuitLocation from bqskit.ir.location import CircuitLocationLike from bqskit.utils.typing import is_integer -from bqskit.utils.typing import is_iterable +from bqskit.utils.typing import is_iterable, is_mapping _logger = logging.getLogger(__name__) @@ -33,31 +34,143 @@ class CouplingGraph(Collection[Tuple[int, int]]): def __init__( self, - graph: Iterable[tuple[int, int]], + graph: CouplingGraphLike, num_qudits: int | None = None, + remote_edges: Iterable[tuple[int, int]] = [], + default_weight: int = 1, + default_remote_weight: int = 100, + edge_weights_overrides: Mapping[tuple[int, int], int] = {}, ) -> None: - if isinstance(graph, CouplingGraph): - self.num_qudits: int = graph.num_qudits - self._edges: set[tuple[int, int]] = graph._edges - self._adj: list[set[int]] = graph._adj - return + """ + Construct a new CouplingGraph. + + Args: + graph (CouplingGraphLike): The undirected graph edges. + + num_qudits (int | None): The number of qudits in the graph. If + None, the number of qudits is inferred from the maximum seen + in the edge list. (Default: None) + + remote_edges (Iterable[tuple[int, int]]): The edges that cross + QPU chip boundaries. Distributed QPUs will have remote links + connect them. Notes, remote edges must specified both in + `graph` and here. (Default: []) + + default_weight (int): The default weight of an edge in the + graph. (Default: 1) + + default_remote_weight (int): The default weight of a remote + edge in the graph. (Default: 100) + + edge_weights_overrides (Mapping[tuple[int, int], int]): A mapping + of edges to their weights. These override the defaults on + a case-by-case basis. (Default: {}) + + Raises: + ValueError: If `num_qudits` is too small for the edges in `graph`. + ValueError: If `num_qudits` is less than zero. + + ValueError: If any edge in `remote_edges` is not in `graph`. + + ValueError: If any edge in `edge_weights_overrides` is not in + `graph`. + """ if not CouplingGraph.is_valid_coupling_graph(graph): raise TypeError('Invalid coupling graph.') - self._edges = {g if g[0] <= g[1] else (g[1], g[0]) for g in graph} + if num_qudits is not None and not is_integer(num_qudits): + raise TypeError( + 'Expected integer for num_qudits,' + f' got {type(num_qudits)}', + ) - calced_num_qudits = 0 + if num_qudits is not None and num_qudits < 0: + raise ValueError( + 'Expected nonnegative num_qudits,' + f' got {num_qudits}.' + ) + + if not CouplingGraph.is_valid_coupling_graph(remote_edges): + raise TypeError('Invalid remote links.') + + if any(edge not in graph for edge in remote_edges): + invalids = [e for e in remote_edges if e not in graph] + raise ValueError( + f'Remote links {invalids} not in graph.' + ' All remote links must also be specified in the graph input.', + ) + + if not isinstance(default_weight, int): + raise TypeError( + 'Expected integer for default_weight,' + f' got {type(default_weight)}', + ) + + if not isinstance(default_remote_weight, int): + raise TypeError( + 'Expected integer for default_remote_weight,' + f' got {type(default_remote_weight)}', + ) + + if not is_mapping(edge_weights_overrides): + raise TypeError( + 'Expected mapping for edge_weights_overrides,' + f' got {type(edge_weights_overrides)}', + ) + + if any( + not isinstance(v, int) + for v in edge_weights_overrides.values() + ): + invalids = [ + v for v in edge_weights_overrides.values() + if not isinstance(v, int) + ] + raise TypeError( + 'Expected integer values for edge_weights_overrides,' + f' got non-integer values: {invalids}.', + ) + + if any(edge not in graph for edge in edge_weights_overrides): + invalids = [ + e for e in edge_weights_overrides + if e not in graph + ] + raise ValueError( + f'Edges {invalids} from edge_weights_overrides are not in ' + 'the graph. All edge_weights_overrides must also be ' + 'specified in the graph input.', + ) + + calc_num_qudits = 0 for q1, q2 in self._edges: - calced_num_qudits = max(calced_num_qudits, max(q1, q2)) - calced_num_qudits += 1 + calc_num_qudits = max(calc_num_qudits, max(q1, q2)) + calc_num_qudits += 1 - if num_qudits is None: - self.num_qudits = calced_num_qudits - elif calced_num_qudits > num_qudits: - raise ValueError('Edges between invalid qudits.') - else: - self.num_qudits = num_qudits + if num_qudits is not None and calc_num_qudits > num_qudits: + raise ValueError( + 'Edges between invalid qudits or num_qudits too small.' + ) + + if isinstance(graph, CouplingGraph): + self.num_qudits: int = graph.num_qudits + self._edges: set[tuple[int, int]] = graph._edges + self._remote_edges: set[tuple[int, int]] = graph._remote_edges + self._adj: list[set[int]] = graph._adj + self._mat: list[list[int]] = graph._mat + self.default_weight: int = graph.default_weight + self.default_remote_weight: int = graph.default_remote_weight + return + + self.num_qudits = calc_num_qudits if num_qudits is None else num_qudits + self._edges = {g if g[0] <= g[1] else (g[1], g[0]) for g in graph} + self._remote_edges = { + e if e[0] <= e[1] else (e[1], e[0]) + for e in remote_edges + } + self.default_weight = default_weight + self.default_remote_weight = default_remote_weight self._adj = [set() for _ in range(self.num_qudits)] for q1, q2 in self._edges: @@ -69,8 +182,16 @@ def __init__( for _ in range(self.num_qudits) ] for q1, q2 in self._edges: - self._mat[q1][q2] = 1 - self._mat[q2][q1] = 1 + self._mat[q1][q2] = default_weight + self._mat[q2][q1] = default_weight + + for q1, q2 in self._remote_links: + self._mat[q1][q2] = default_remote_weight + self._mat[q2][q1] = default_remote_weight + + for (q1, q2), weight in edge_weights_overrides.items(): + self._mat[q1][q2] = weight + self._mat[q2][q1] = weight def is_fully_connected(self) -> bool: """Return true if the graph is fully connected.""" From 6aa57a1f637fb244f3c49e6137878a9ac874452d Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Thu, 5 Sep 2024 12:51:19 -0400 Subject: [PATCH 176/197] Added check for target_types registered for a given MachineModel --- bqskit/compiler/registry.py | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/bqskit/compiler/registry.py b/bqskit/compiler/registry.py index ebbd0f4fa..a9e0e14df 100644 --- a/bqskit/compiler/registry.py +++ b/bqskit/compiler/registry.py @@ -14,11 +14,37 @@ _compile_statemap_registry: dict[MachineModel, dict[int, Workflow]] = {} +def model_registered_target_types(key: MachineModel) -> list[str]: + """ + Return a list of target_types for which key is registered. + + Args: + key (MachineModel): A MachineModel to check for. + + Returns: + (list[str]): If `key` has been registered in any of the registry, the + name of that target type will be contained in this list. + """ + global _compile_circuit_registry + global _compile_unitary_registry + global _compile_stateprep_registry + global _compile_statemap_registry + registered_types = [] + if key in _compile_circuit_registry: + registered_types.append('circuit') + if key in _compile_unitary_registry: + registered_types.append('unitary') + if key in _compile_stateprep_registry: + registered_types.append('stateprep') + if key in _compile_statemap_registry: + registered_types.append('statemap') + return registered_types + def register_workflow( key: MachineModel, workflow: WorkflowLike, optimization_level: int, - target_type: str, + target_type: str = 'circuit', ) -> None: """ Register a workflow for a given MachineModel. @@ -38,17 +64,18 @@ def register_workflow( be executed if the MachineModel in a call to `compile` matches `key`. If `key` is already registered, a warning will be logged. - optimization_level (Optional[int]): The optimization level with which - to register the workflow. If no level is provided, the Workflow - will be registered as level 1. + optimization_level (int): The optimization level with which to + register the workflow. If no level is provided, the Workflow will + be registered as level 1. target_type (str): Register a workflow for targets of this type. Must be 'circuit', 'unitary', 'stateprep', or 'statemap'. + (Default: 'circuit') Example: model_t = SpecificMachineModel(num_qudits, radixes) workflow = [QuickPartitioner(3), NewFangledOptimization()] - register_workflow(model_t, workflow, level) + register_workflow(model_t, workflow, level, 'circuit') ... new_circuit = compile(circuit, model_t, optimization_level=level) From 5b08aa761e099203862f2022c1dc27ad4e0b9795 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Thu, 5 Sep 2024 13:03:20 -0400 Subject: [PATCH 177/197] pre-commit --- bqskit/compiler/registry.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bqskit/compiler/registry.py b/bqskit/compiler/registry.py index a9e0e14df..c6d25eb5d 100644 --- a/bqskit/compiler/registry.py +++ b/bqskit/compiler/registry.py @@ -40,6 +40,7 @@ def model_registered_target_types(key: MachineModel) -> list[str]: registered_types.append('statemap') return registered_types + def register_workflow( key: MachineModel, workflow: WorkflowLike, @@ -64,7 +65,7 @@ def register_workflow( be executed if the MachineModel in a call to `compile` matches `key`. If `key` is already registered, a warning will be logged. - optimization_level (int): The optimization level with which to + optimization_level (int): The optimization level with which to register the workflow. If no level is provided, the Workflow will be registered as level 1. From ab0a43440af4d039e57927362e5bb9597a0774a5 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Thu, 5 Sep 2024 13:03:45 -0400 Subject: [PATCH 178/197] Warn if MachineModel is not registered for target_type, but is for others --- bqskit/compiler/compile.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/bqskit/compiler/compile.py b/bqskit/compiler/compile.py index 76ffa2ce3..4536f0697 100644 --- a/bqskit/compiler/compile.py +++ b/bqskit/compiler/compile.py @@ -18,6 +18,7 @@ from bqskit.compiler.registry import _compile_statemap_registry from bqskit.compiler.registry import _compile_stateprep_registry from bqskit.compiler.registry import _compile_unitary_registry +from bqskit.compiler.registry import model_registered_target_types from bqskit.compiler.workflow import Workflow from bqskit.compiler.workflow import WorkflowLike from bqskit.ir.circuit import Circuit @@ -675,6 +676,8 @@ def build_workflow( if model is None: model = MachineModel(input.num_qudits, radixes=input.radixes) + model_registered_types = model_registered_target_types(model) + if isinstance(input, Circuit): if input.num_qudits > max_synthesis_size: if any( @@ -696,6 +699,11 @@ def build_workflow( if model in _compile_circuit_registry: if optimization_level in _compile_circuit_registry[model]: return _compile_circuit_registry[model][optimization_level] + elif len(model_registered_types) > 0: + m = f'MachineModel {model} is registered for inputs of type in ' + m += f'{model_registered_types}, but input is {type(input)}. ' + m += f'You may need to register a Workflow for type {type(input)}.' + warnings.warn(m) return _circuit_workflow( model, @@ -718,6 +726,11 @@ def build_workflow( if model in _compile_unitary_registry: if optimization_level in _compile_unitary_registry[model]: return _compile_unitary_registry[model][optimization_level] + elif len(model_registered_types) > 0: + m = f'MachineModel {model} is registered for inputs of type in ' + m += f'{model_registered_types}, but input is {type(input)}. ' + m += f'You may need to register a Workflow for type {type(input)}.' + warnings.warn(m) return _synthesis_workflow( input, @@ -741,6 +754,11 @@ def build_workflow( if model in _compile_stateprep_registry: if optimization_level in _compile_stateprep_registry[model]: return _compile_stateprep_registry[model][optimization_level] + elif len(model_registered_types) > 0: + m = f'MachineModel {model} is registered for inputs of type in ' + m += f'{model_registered_types}, but input is {type(input)}. ' + m += f'You may need to register a Workflow for type {type(input)}.' + warnings.warn(m) return _stateprep_workflow( input, @@ -764,6 +782,11 @@ def build_workflow( if model in _compile_statemap_registry: if optimization_level in _compile_statemap_registry[model]: return _compile_statemap_registry[model][optimization_level] + elif len(model_registered_types) > 0: + m = f'MachineModel {model} is registered for inputs of type in ' + m += f'{model_registered_types}, but input is {type(input)}. ' + m += f'You may need to register a Workflow for type {type(input)}.' + warnings.warn(m) return _statemap_workflow( input, From 659fae823630ab3c4d5caae00b00fd363a49595a Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 5 Sep 2024 14:48:01 -0400 Subject: [PATCH 179/197] Some progress --- bqskit/qis/graph.py | 69 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 66 insertions(+), 3 deletions(-) diff --git a/bqskit/qis/graph.py b/bqskit/qis/graph.py index 5439a3b40..f6b1684ac 100644 --- a/bqskit/qis/graph.py +++ b/bqskit/qis/graph.py @@ -11,10 +11,10 @@ from typing import Iterable from typing import Iterator from typing import List +from typing import Mapping from typing import Tuple from typing import TYPE_CHECKING from typing import Union -from typing import Mapping import numpy as np @@ -88,7 +88,7 @@ def __init__( if num_qudits is not None and num_qudits < 0: raise ValueError( 'Expected nonnegative num_qudits,' - f' got {num_qudits}.' + f' got {num_qudits}.', ) if not CouplingGraph.is_valid_coupling_graph(remote_edges): @@ -150,7 +150,7 @@ def __init__( if num_qudits is not None and calc_num_qudits > num_qudits: raise ValueError( - 'Edges between invalid qudits or num_qudits too small.' + 'Edges between invalid qudits or num_qudits too small.', ) if isinstance(graph, CouplingGraph): @@ -193,6 +193,69 @@ def __init__( self._mat[q1][q2] = weight self._mat[q2][q1] = weight + def is_distributed(self) -> bool: + """Return true if the graph represents multiple connected QPUs.""" + return len(self._remote_edges) > 0 + + def qpu_count(self) -> int: + """Return the number of connected QPUs.""" + return len(self.get_qpu_to_qudit_map()) + + def get_individual_qpu_graphs(self) -> list[CouplingGraph]: + """Return a list of individual QPU graphs.""" + if not self.is_distributed(): + return [self] + + qpu_to_qudit = self.get_qpu_to_qudit_map() + return [self.get_subgraph(qpu) for qpu in qpu_to_qudit] + + def get_qpu_to_qudit_map(self) -> list[list[int]]: + """Return a mapping of QPU indices to qudit indices.""" + # TODO: Cache this? + seen = set() + qpus = [] + for qudit in range(self.num_qudits): + if qudit in seen: + continue + qpu = [] + frontier = {qudit} + while len(frontier) > 0: + node = frontier.pop() + qpu.append(node) + seen.add(node) + for neighbor in self._adj[node]: + if (node, neighbor) in self._remote_edges: + continue + if (neighbor, node) in self._remote_edges: + continue + if neighbor not in seen: + frontier.add(neighbor) + qpus.append(qpu) + # TODO: Assumes that the individual qpus are connected + # If seen is not everything, throw an error? + return qpus + + def get_qudit_to_qpu_map(self) -> dict[int, int]: + """Return a mapping of qudit indices to QPU indices.""" + qpu_to_qudit = self.get_qpu_to_qudit_map() + qudit_to_qpu = {} + for qpu, qudits in enumerate(qpu_to_qudit): + for qudit in qudits: + qudit_to_qpu[qudit] = qpu + return qudit_to_qpu + + def get_qpu_connectivity(self) -> list[list[int]]: + """Return the adjacency list of the QPUs.""" + qpu_to_qudit = self.get_qpu_to_qudit_map() + qudit_to_qpu = self.get_qudit_to_qpu_map() + qpu_adj = [set() for _ in range(len(qpu_to_qudit))] + for q1, q2 in self._remote_edges: + qpu1 = qudit_to_qpu[q1] + qpu2 = qudit_to_qpu[q2] + qpu_adj[qpu1].add(qpu2) + qpu_adj[qpu2].add(qpu1) + return qpu_adj + def is_fully_connected(self) -> bool: """Return true if the graph is fully connected.""" frontier: set[int] = {0} From 2df5770b4a5ddf9a0ed4251dcd708c260ebbdec6 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sun, 8 Sep 2024 09:00:41 -0400 Subject: [PATCH 180/197] Check for error --- bqskit/qis/graph.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/bqskit/qis/graph.py b/bqskit/qis/graph.py index f6b1684ac..834599f13 100644 --- a/bqskit/qis/graph.py +++ b/bqskit/qis/graph.py @@ -231,8 +231,13 @@ def get_qpu_to_qudit_map(self) -> list[list[int]]: if neighbor not in seen: frontier.add(neighbor) qpus.append(qpu) - # TODO: Assumes that the individual qpus are connected - # If seen is not everything, throw an error? + + if len(seen) != self.num_qudits: + raise RuntimeError( + 'Graph is not fully connected and pathological' + ' for distributed subroutines.' + ) + return qpus def get_qudit_to_qpu_map(self) -> dict[int, int]: From 6c18c1fa1ed84975692dd67b7f3504d45549d1ef Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Sun, 8 Sep 2024 11:12:09 -0400 Subject: [PATCH 181/197] Edge weights to floats --- bqskit/passes/mapping/pam.py | 4 +-- bqskit/passes/mapping/sabre.py | 8 +++--- bqskit/qis/graph.py | 48 ++++++++++++++++------------------ 3 files changed, 29 insertions(+), 31 deletions(-) diff --git a/bqskit/passes/mapping/pam.py b/bqskit/passes/mapping/pam.py index 549ab2b6b..92127a2c3 100644 --- a/bqskit/passes/mapping/pam.py +++ b/bqskit/passes/mapping/pam.py @@ -277,7 +277,7 @@ def _get_best_perm( cg: CouplingGraph, F: set[CircuitPoint], pi: list[int], - D: list[list[int]], + D: list[list[float]], E: set[CircuitPoint], qudits: Sequence[int], ) -> tuple[tuple[int, ...], Circuit, tuple[int, ...]]: @@ -366,7 +366,7 @@ def _score_perm( circuit: Circuit, F: set[CircuitPoint], pi: list[int], - D: list[list[int]], + D: list[list[float]], perm: tuple[Sequence[int], Sequence[int]], E: set[CircuitPoint], ) -> float: diff --git a/bqskit/passes/mapping/sabre.py b/bqskit/passes/mapping/sabre.py index b257fda84..27d52c0da 100644 --- a/bqskit/passes/mapping/sabre.py +++ b/bqskit/passes/mapping/sabre.py @@ -363,7 +363,7 @@ def _get_best_swap( circuit: Circuit, F: set[CircuitPoint], E: set[CircuitPoint], - D: list[list[int]], + D: list[list[float]], cg: CouplingGraph, pi: list[int], decay: list[float], @@ -416,7 +416,7 @@ def _score_swap( circuit: Circuit, F: set[CircuitPoint], pi: list[int], - D: list[list[int]], + D: list[list[float]], swap: tuple[int, int], decay: list[float], E: set[CircuitPoint], @@ -475,7 +475,7 @@ def _get_distance( self, logical_qudits: Sequence[int], pi: list[int], - D: list[list[int]], + D: list[list[float]], ) -> float: """Calculate the expected number of swaps to connect logical qudits.""" min_term = np.inf @@ -493,7 +493,7 @@ def _uphill_swaps( logical_qudits: Sequence[int], cg: CouplingGraph, pi: list[int], - D: list[list[int]], + D: list[list[float]], ) -> Iterator[tuple[int, int]]: """Yield the swaps necessary to bring some of the qudits together.""" center_qudit = min( diff --git a/bqskit/qis/graph.py b/bqskit/qis/graph.py index 834599f13..2c0d31fab 100644 --- a/bqskit/qis/graph.py +++ b/bqskit/qis/graph.py @@ -6,11 +6,9 @@ import logging from random import shuffle from typing import Any -from typing import cast from typing import Collection from typing import Iterable from typing import Iterator -from typing import List from typing import Mapping from typing import Tuple from typing import TYPE_CHECKING @@ -24,7 +22,7 @@ from bqskit.ir.location import CircuitLocation from bqskit.ir.location import CircuitLocationLike from bqskit.utils.typing import is_integer -from bqskit.utils.typing import is_iterable, is_mapping +from bqskit.utils.typing import is_iterable, is_mapping, is_real_number _logger = logging.getLogger(__name__) @@ -37,9 +35,9 @@ def __init__( graph: CouplingGraphLike, num_qudits: int | None = None, remote_edges: Iterable[tuple[int, int]] = [], - default_weight: int = 1, - default_remote_weight: int = 100, - edge_weights_overrides: Mapping[tuple[int, int], int] = {}, + default_weight: float = 1.0, + default_remote_weight: float = 100.0, + edge_weights_overrides: Mapping[tuple[int, int], float] = {}, ) -> None: """ Construct a new CouplingGraph. @@ -56,13 +54,13 @@ def __init__( connect them. Notes, remote edges must specified both in `graph` and here. (Default: []) - default_weight (int): The default weight of an edge in the - graph. (Default: 1) + default_weight (float): The default weight of an edge in the + graph. (Default: 1.0) - default_remote_weight (int): The default weight of a remote - edge in the graph. (Default: 100) + default_remote_weight (float): The default weight of a remote + edge in the graph. (Default: 100.0) - edge_weights_overrides (Mapping[tuple[int, int], int]): A mapping + edge_weights_overrides (Mapping[tuple[int, int], float]): A mapping of edges to their weights. These override the defaults on a case-by-case basis. (Default: {}) @@ -101,13 +99,13 @@ def __init__( ' All remote links must also be specified in the graph input.', ) - if not isinstance(default_weight, int): + if not is_real_number(default_weight): raise TypeError( 'Expected integer for default_weight,' f' got {type(default_weight)}', ) - if not isinstance(default_remote_weight, int): + if not is_real_number(default_remote_weight): raise TypeError( 'Expected integer for default_remote_weight,' f' got {type(default_remote_weight)}', @@ -120,12 +118,12 @@ def __init__( ) if any( - not isinstance(v, int) + not is_real_number(v) for v in edge_weights_overrides.values() ): invalids = [ v for v in edge_weights_overrides.values() - if not isinstance(v, int) + if not is_real_number(v) ] raise TypeError( 'Expected integer values for edge_weights_overrides,' @@ -158,9 +156,9 @@ def __init__( self._edges: set[tuple[int, int]] = graph._edges self._remote_edges: set[tuple[int, int]] = graph._remote_edges self._adj: list[set[int]] = graph._adj - self._mat: list[list[int]] = graph._mat - self.default_weight: int = graph.default_weight - self.default_remote_weight: int = graph.default_remote_weight + self._mat: list[list[float]] = graph._mat + self.default_weight: float = graph.default_weight + self.default_remote_weight: float = graph.default_remote_weight return self.num_qudits = calc_num_qudits if num_qudits is None else num_qudits @@ -185,7 +183,7 @@ def __init__( self._mat[q1][q2] = default_weight self._mat[q2][q1] = default_weight - for q1, q2 in self._remote_links: + for q1, q2 in self._remote_edges: self._mat[q1][q2] = default_remote_weight self._mat[q2][q1] = default_remote_weight @@ -235,7 +233,7 @@ def get_qpu_to_qudit_map(self) -> list[list[int]]: if len(seen) != self.num_qudits: raise RuntimeError( 'Graph is not fully connected and pathological' - ' for distributed subroutines.' + ' for distributed subroutines.', ) return qpus @@ -249,11 +247,11 @@ def get_qudit_to_qpu_map(self) -> dict[int, int]: qudit_to_qpu[qudit] = qpu return qudit_to_qpu - def get_qpu_connectivity(self) -> list[list[int]]: + def get_qpu_connectivity(self) -> list[set[int]]: """Return the adjacency list of the QPUs.""" qpu_to_qudit = self.get_qpu_to_qudit_map() qudit_to_qpu = self.get_qudit_to_qpu_map() - qpu_adj = [set() for _ in range(len(qpu_to_qudit))] + qpu_adj: list[set[int]] = [set() for _ in range(len(qpu_to_qudit))] for q1, q2 in self._remote_edges: qpu1 = qudit_to_qpu[q1] qpu2 = qudit_to_qpu[q2] @@ -339,12 +337,12 @@ def __repr__(self) -> str: def get_qudit_degrees(self) -> list[int]: return [len(l) for l in self._adj] - def all_pairs_shortest_path(self) -> list[list[int]]: + def all_pairs_shortest_path(self) -> list[list[float]]: """ Calculate all pairs shortest path matrix using Floyd-Warshall. Returns: - D (list[list[int]]): D[i][j] is the length of the shortest + D (list[list[float]]): D[i][j] is the length of the shortest path from i to j. """ D = copy.deepcopy(self._mat) @@ -352,7 +350,7 @@ def all_pairs_shortest_path(self) -> list[list[int]]: for i in range(self.num_qudits): for j in range(self.num_qudits): D[i][j] = min(D[i][j], D[i][k] + D[k][j]) - return cast(List[List[int]], D) + return D def get_shortest_path_tree(self, source: int) -> list[tuple[int, ...]]: """Return shortest path from `source` to every node in `self`.""" From 502cd197d5df8dae1719540bfff160e221397628 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 9 Sep 2024 08:19:07 -0400 Subject: [PATCH 182/197] More tests and cleanup --- bqskit/qis/graph.py | 79 +++++++-------- tests/qis/test_graph.py | 208 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 244 insertions(+), 43 deletions(-) diff --git a/bqskit/qis/graph.py b/bqskit/qis/graph.py index 2c0d31fab..54e0664ec 100644 --- a/bqskit/qis/graph.py +++ b/bqskit/qis/graph.py @@ -141,16 +141,6 @@ def __init__( 'specified in the graph input.', ) - calc_num_qudits = 0 - for q1, q2 in self._edges: - calc_num_qudits = max(calc_num_qudits, max(q1, q2)) - calc_num_qudits += 1 - - if num_qudits is not None and calc_num_qudits > num_qudits: - raise ValueError( - 'Edges between invalid qudits or num_qudits too small.', - ) - if isinstance(graph, CouplingGraph): self.num_qudits: int = graph.num_qudits self._edges: set[tuple[int, int]] = graph._edges @@ -161,6 +151,16 @@ def __init__( self.default_remote_weight: float = graph.default_remote_weight return + calc_num_qudits = 0 + for q1, q2 in graph: + calc_num_qudits = max(calc_num_qudits, max(q1, q2)) + calc_num_qudits += 1 + + if num_qudits is not None and calc_num_qudits > num_qudits: + raise ValueError( + 'Edges between invalid qudits or num_qudits too small.', + ) + self.num_qudits = calc_num_qudits if num_qudits is None else num_qudits self._edges = {g if g[0] <= g[1] else (g[1], g[0]) for g in graph} self._remote_edges = { @@ -191,6 +191,30 @@ def __init__( self._mat[q1][q2] = weight self._mat[q2][q1] = weight + def get_qpu_to_qudit_map(self) -> list[list[int]]: + """Return a mapping of QPU indices to qudit indices.""" + if not hasattr(self, '_qpu_to_qudit'): + seen = set() + self._qpu_to_qudit = [] + for qudit in range(self.num_qudits): + if qudit in seen: + continue + qpu = [] + frontier = {qudit} + while len(frontier) > 0: + node = frontier.pop() + qpu.append(node) + seen.add(node) + for neighbor in self._adj[node]: + if (node, neighbor) in self._remote_edges: + continue + if (neighbor, node) in self._remote_edges: + continue + if neighbor not in seen: + frontier.add(neighbor) + self._qpu_to_qudit.append(qpu) + return self._qpu_to_qudit + def is_distributed(self) -> bool: """Return true if the graph represents multiple connected QPUs.""" return len(self._remote_edges) > 0 @@ -207,45 +231,14 @@ def get_individual_qpu_graphs(self) -> list[CouplingGraph]: qpu_to_qudit = self.get_qpu_to_qudit_map() return [self.get_subgraph(qpu) for qpu in qpu_to_qudit] - def get_qpu_to_qudit_map(self) -> list[list[int]]: - """Return a mapping of QPU indices to qudit indices.""" - # TODO: Cache this? - seen = set() - qpus = [] - for qudit in range(self.num_qudits): - if qudit in seen: - continue - qpu = [] - frontier = {qudit} - while len(frontier) > 0: - node = frontier.pop() - qpu.append(node) - seen.add(node) - for neighbor in self._adj[node]: - if (node, neighbor) in self._remote_edges: - continue - if (neighbor, node) in self._remote_edges: - continue - if neighbor not in seen: - frontier.add(neighbor) - qpus.append(qpu) - - if len(seen) != self.num_qudits: - raise RuntimeError( - 'Graph is not fully connected and pathological' - ' for distributed subroutines.', - ) - - return qpus - - def get_qudit_to_qpu_map(self) -> dict[int, int]: + def get_qudit_to_qpu_map(self) -> list[int]: """Return a mapping of qudit indices to QPU indices.""" qpu_to_qudit = self.get_qpu_to_qudit_map() qudit_to_qpu = {} for qpu, qudits in enumerate(qpu_to_qudit): for qudit in qudits: qudit_to_qpu[qudit] = qpu - return qudit_to_qpu + return list(qudit_to_qpu.values()) def get_qpu_connectivity(self) -> list[set[int]]: """Return the adjacency list of the QPUs.""" diff --git a/tests/qis/test_graph.py b/tests/qis/test_graph.py index 5a205e8ee..158238959 100644 --- a/tests/qis/test_graph.py +++ b/tests/qis/test_graph.py @@ -1,9 +1,217 @@ """This module tests the CouplingGraph class.""" from __future__ import annotations +from typing import Any + import pytest from bqskit.qis.graph import CouplingGraph +from bqskit.qis.graph import CouplingGraphLike + + +def test_coupling_graph_init_valid() -> None: + # Test with valid inputs + graph = {(0, 1), (1, 2), (2, 3)} + num_qudits = 4 + remote_edges = [(1, 2)] + default_weight = 1.0 + default_remote_weight = 10.0 + edge_weights_overrides = {(1, 2): 0.5} + + coupling_graph = CouplingGraph( + graph, + num_qudits, + remote_edges, + default_weight, + default_remote_weight, + edge_weights_overrides, + ) + + assert coupling_graph.num_qudits == num_qudits + assert coupling_graph._edges == graph + assert coupling_graph._remote_edges == set(remote_edges) + assert coupling_graph.default_weight == default_weight + assert coupling_graph.default_remote_weight == default_remote_weight + assert all( + coupling_graph._mat[q1][q2] == weight + for (q1, q2), weight in edge_weights_overrides.items() + ) + + +@pytest.mark.parametrize( + 'graph, num_qudits, remote_edges, default_weight, default_remote_weight,' + ' edge_weights_overrides, expected_exception', + [ + # Invalid graph + (None, 4, [], 1.0, 100.0, {}, TypeError), + # num_qudits is not an integer + ({(0, 1)}, '4', [], 1.0, 100.0, {}, TypeError), + # num_qudits is negative + ({(0, 1)}, -1, [], 1.0, 100.0, {}, ValueError), + # Invalid remote_edges + ({(0, 1)}, 4, None, 1.0, 100.0, {}, TypeError), + # Remote edge not in graph + ({(0, 1)}, 4, [(1, 2)], 1.0, 100.0, {}, ValueError), + # Invalid default_weight + ({(0, 1)}, 4, [], '1.0', 100.0, {}, TypeError), + # Invalid default_remote_weight + ({(0, 1)}, 4, [], 1.0, '100.0', {}, TypeError), + # Invalid edge_weights_overrides + ({(0, 1)}, 4, [], 1.0, 100.0, None, TypeError), + # Non-integer value in edge_weights_overrides + ({(0, 1)}, 4, [], 1.0, 100.0, {(0, 1): '0.5'}, TypeError), + # Edge in edge_weights_overrides not in graph + ({(0, 1)}, 4, [], 1.0, 100.0, {(1, 2): 0.5}, ValueError), + ], +) +def test_coupling_graph_init_invalid( + graph: CouplingGraphLike, + num_qudits: Any, + remote_edges: Any, + default_weight: Any, + default_remote_weight: Any, + edge_weights_overrides: Any, + expected_exception: Exception, +) -> None: + with pytest.raises(expected_exception): + CouplingGraph( + graph, + num_qudits, + remote_edges, + default_weight, + default_remote_weight, + edge_weights_overrides, + ) + + +def test_get_qpu_to_qudit_map_single_qpu() -> None: + graph = CouplingGraph([(0, 1), (1, 2), (2, 3)]) + expected_map = [[0, 1, 2, 3]] + assert graph.get_qpu_to_qudit_map() == expected_map + + +def test_get_qpu_to_qudit_map_multiple_qpus() -> None: + graph = CouplingGraph([(0, 1), (1, 2), (2, 3)], remote_edges=[(1, 2)]) + expected_map = [[0, 1], [2, 3]] + assert graph.get_qpu_to_qudit_map() == expected_map + + +def test_get_qpu_to_qudit_map_disconnected() -> None: + graph = CouplingGraph([(0, 1), (1, 2), (3, 4)], remote_edges=[(1, 2)]) + expected_map = [[0, 1], [2], [3, 4]] + assert graph.get_qpu_to_qudit_map() == expected_map + + +def test_get_qpu_to_qudit_map_empty_graph() -> None: + graph = CouplingGraph([]) + expected_map = [[0]] + assert graph.get_qpu_to_qudit_map() == expected_map + + +def test_get_qpu_to_qudit_map_complex_topology() -> None: + graph = CouplingGraph( + [(0, 1), (1, 2), (0, 2), (2, 5), (3, 4), (4, 5), (3, 5)], + remote_edges=[(2, 5)], + ) + expected_map = [[0, 1, 2], [3, 4, 5]] + assert graph.get_qpu_to_qudit_map() == expected_map + + +def test_get_qudit_to_qpu_map_three_qpu() -> None: + graph = CouplingGraph( + [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)], + remote_edges=[(2, 3), (5, 6)], + ) + expected_map = [[0, 1, 2], [3, 4, 5], [6, 7]] + assert graph.get_qpu_to_qudit_map() == expected_map + + +def test_is_distributed() -> None: + graph = CouplingGraph([(0, 1), (1, 2), (2, 3)]) + assert not graph.is_distributed() + + graph = CouplingGraph([(0, 1), (1, 2), (2, 3)], remote_edges=[(1, 2)]) + assert graph.is_distributed() + + graph = CouplingGraph([(0, 1), (1, 2), (2, 3)], remote_edges=[(1, 2)]) + assert graph.is_distributed() + + graph = CouplingGraph( + [(0, 1), (1, 2), (2, 3)], + remote_edges=[(1, 2), (2, 3)], + ) + assert graph.is_distributed() + + +def test_qpu_count() -> None: + graph = CouplingGraph([(0, 1), (1, 2), (2, 3)]) + assert graph.qpu_count() == 1 + + graph = CouplingGraph([(0, 1), (1, 2), (2, 3)], remote_edges=[(1, 2)]) + assert graph.qpu_count() == 2 + + graph = CouplingGraph( + [(0, 1), (1, 2), (2, 3)], + remote_edges=[(1, 2), (2, 3)], + ) + assert graph.qpu_count() == 3 + + graph = CouplingGraph([]) + assert graph.qpu_count() == 1 + + +def test_get_individual_qpu_graphs() -> None: + graph = CouplingGraph([(0, 1), (1, 2), (2, 3)]) + qpus = graph.get_individual_qpu_graphs() + assert len(qpus) == 1 + assert qpus[0] == graph + + graph = CouplingGraph([(0, 1), (1, 2), (2, 3)], remote_edges=[(1, 2)]) + qpus = graph.get_individual_qpu_graphs() + assert len(qpus) == 2 + assert qpus[0] == CouplingGraph([(0, 1)]) + assert qpus[1] == CouplingGraph([(0, 1)]) + + graph = CouplingGraph( + [(0, 1), (1, 2), (2, 3)], + remote_edges=[(1, 2), (2, 3)], + ) + qpus = graph.get_individual_qpu_graphs() + assert len(qpus) == 3 + assert qpus[0] == CouplingGraph([(0, 1)]) + assert qpus[1] == CouplingGraph([]) + assert qpus[2] == CouplingGraph([]) + + +def test_get_qudit_to_qpu_map() -> None: + graph = CouplingGraph([(0, 1), (1, 2), (2, 3)]) + assert graph.get_qudit_to_qpu_map() == [0, 0, 0, 0] + + graph = CouplingGraph([(0, 1), (1, 2), (2, 3)], remote_edges=[(1, 2)]) + assert graph.get_qudit_to_qpu_map() == [0, 0, 1, 1] + + graph = CouplingGraph( + [(0, 1), (1, 2), (2, 3)], + remote_edges=[(1, 2), (2, 3)], + ) + assert graph.get_qudit_to_qpu_map() == [0, 0, 1, 2] + + graph = CouplingGraph([]) + assert graph.get_qudit_to_qpu_map() == [0] + + +def test_get_qpu_connectivity() -> None: + graph = CouplingGraph([(0, 1), (1, 2), (2, 3)]) + assert graph.get_qpu_connectivity() == [set()] + + graph = CouplingGraph([(0, 1), (1, 2), (2, 3)], remote_edges=[(1, 2)]) + assert graph.get_qpu_connectivity() == [{1}, {0}] + + graph = CouplingGraph( + [(0, 1), (1, 2), (2, 3)], + remote_edges=[(1, 2), (2, 3)], + ) + assert graph.get_qpu_connectivity() == [{1}, {0, 2}, {1}] class TestGraphGetSubgraphsOfSize: From 5f219aace5980a4841c61cb1e9fcefec9d5f8152 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Mon, 9 Sep 2024 08:19:27 -0400 Subject: [PATCH 183/197] Distributed Predicate --- .../passes/control/predicates/distributed.py | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 bqskit/passes/control/predicates/distributed.py diff --git a/bqskit/passes/control/predicates/distributed.py b/bqskit/passes/control/predicates/distributed.py new file mode 100644 index 000000000..a1ceb2275 --- /dev/null +++ b/bqskit/passes/control/predicates/distributed.py @@ -0,0 +1,26 @@ +"""This module implements the DistributedPredicate class.""" +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING + +from bqskit.passes.control.predicate import PassPredicate + +if TYPE_CHECKING: + from bqskit.compiler.passdata import PassData + from bqskit.ir.circuit import Circuit + +_logger = logging.getLogger(__name__) + + +class DistributedPredicate(PassPredicate): + """ + The DistributedPredicate class. + + The DistributedPredicate returns true if the targeted machine is distributed + across multiple chips. + """ + + def get_truth_value(self, circuit: Circuit, data: PassData) -> bool: + """Call this predicate, see :class:`PassPredicate` for more info.""" + return data.model.coupling_graph.is_distributed() From e0dbf3eca2df105b3cc0b26c8117f3cd73c433d1 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Tue, 10 Sep 2024 17:24:17 -0400 Subject: [PATCH 184/197] fix circuitgate parameter counting issue --- bqskit/ir/gates/circuitgate.py | 1 + 1 file changed, 1 insertion(+) diff --git a/bqskit/ir/gates/circuitgate.py b/bqskit/ir/gates/circuitgate.py index 8c33c4b52..d870b7931 100644 --- a/bqskit/ir/gates/circuitgate.py +++ b/bqskit/ir/gates/circuitgate.py @@ -137,6 +137,7 @@ def get_qasm_gate_def(self) -> str: ', '.join([str(p) for p in params]), ', q'.join([str(q) for q in op.location]), ).replace('()', '') + param_index += op.num_params ret += '}\n' return ret From c33fda2db8255a561a6c7402724d7af3c714f9c9 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 11 Sep 2024 10:45:55 -0400 Subject: [PATCH 185/197] Update Models --- bqskit/ext/__init__.py | 9 +++++++++ bqskit/ext/honeywell.py | 23 ----------------------- bqskit/ext/quantinuum.py | 5 +++-- bqskit/ext/rigetti.py | 13 +++++++++++++ 4 files changed, 25 insertions(+), 25 deletions(-) delete mode 100644 bqskit/ext/honeywell.py diff --git a/bqskit/ext/__init__.py b/bqskit/ext/__init__.py index 131eec208..d2381085c 100644 --- a/bqskit/ext/__init__.py +++ b/bqskit/ext/__init__.py @@ -18,8 +18,11 @@ Aspen11Model AspenM2Model + ANKAA2Model + ANKAA9Q3Model H1_1Model H1_2Model + H2_1Model Sycamore23Model SycamoreModel model_from_backend @@ -64,8 +67,11 @@ from bqskit.ext.qiskit.translate import qiskit_to_bqskit from bqskit.ext.quantinuum import H1_1Model from bqskit.ext.quantinuum import H1_2Model +from bqskit.ext.quantinuum import H2_1Model from bqskit.ext.qutip.translate import bqskit_to_qutip from bqskit.ext.qutip.translate import qutip_to_bqskit +from bqskit.ext.rigetti import ANKAA2Model +from bqskit.ext.rigetti import ANKAA9Q3Model from bqskit.ext.rigetti import Aspen11Model from bqskit.ext.rigetti import AspenM2Model from bqskit.ext.supermarq import supermarq_critical_depth @@ -95,6 +101,9 @@ 'AspenM2Model', 'H1_1Model', 'H1_2Model', + 'H2_1Model', + 'ANKAA2Model', + 'ANKAA9Q3Model', 'Sycamore23Model', 'SycamoreModel', ] diff --git a/bqskit/ext/honeywell.py b/bqskit/ext/honeywell.py deleted file mode 100644 index af0aa7537..000000000 --- a/bqskit/ext/honeywell.py +++ /dev/null @@ -1,23 +0,0 @@ -"""This module implemenets Honeywell QPU models.""" -from __future__ import annotations - -import warnings - -from bqskit.compiler.machine import MachineModel -from bqskit.ir.gate import Gate -from bqskit.ir.gates.constant.zz import ZZGate -from bqskit.ir.gates.parameterized.rz import RZGate -from bqskit.ir.gates.parameterized.u1q import U1qPi2Gate -from bqskit.ir.gates.parameterized.u1q import U1qPiGate - -warnings.warn( - 'Honeywell Quantum is now Quantinuum. Please use the ' - 'Quantinuum QPU models and gate sets instead. This warning will become' - 'an error in a future version of BQSKit.', - DeprecationWarning, -) - -honeywell_gate_set: set[Gate] = {U1qPiGate, U1qPi2Gate, RZGate(), ZZGate()} - -H1_1Model = MachineModel(20, None, honeywell_gate_set) -H1_2Model = MachineModel(12, None, honeywell_gate_set) diff --git a/bqskit/ext/quantinuum.py b/bqskit/ext/quantinuum.py index a24a062de..948fe380f 100644 --- a/bqskit/ext/quantinuum.py +++ b/bqskit/ext/quantinuum.py @@ -3,12 +3,13 @@ from bqskit.compiler.machine import MachineModel from bqskit.ir.gate import Gate -from bqskit.ir.gates.constant.zz import ZZGate from bqskit.ir.gates.parameterized.rz import RZGate +from bqskit.ir.gates.parameterized.rzz import RZZGate from bqskit.ir.gates.parameterized.u1q import U1qPi2Gate from bqskit.ir.gates.parameterized.u1q import U1qPiGate -quantinuum_gate_set: set[Gate] = {U1qPiGate, U1qPi2Gate, RZGate(), ZZGate()} +quantinuum_gate_set: set[Gate] = {U1qPiGate, U1qPi2Gate, RZGate(), RZZGate()} H1_1Model = MachineModel(20, None, quantinuum_gate_set) H1_2Model = MachineModel(20, None, quantinuum_gate_set) +H2_1Model = MachineModel(56, None, quantinuum_gate_set) diff --git a/bqskit/ext/rigetti.py b/bqskit/ext/rigetti.py index 15bba867a..2b2c322cf 100644 --- a/bqskit/ext/rigetti.py +++ b/bqskit/ext/rigetti.py @@ -3,6 +3,7 @@ from bqskit.compiler.machine import MachineModel from bqskit.ir.gates.constant.cz import CZGate +from bqskit.ir.gates.constant.iswap import ISwapGate from bqskit.ir.gates.constant.sx import SXGate from bqskit.ir.gates.constant.x import XGate from bqskit.ir.gates.parameterized.rz import RZGate @@ -10,6 +11,8 @@ rigetti_gate_set = {SXGate(), XGate(), RZGate(), CZGate()} +ankaa_gate_set = {SXGate(), XGate(), RZGate(), CZGate(), ISwapGate()} + _aspen_11_coupling_graph = CouplingGraph([ # Ring 1 (0, 1), (1, 2), (2, 3), (3, 4), @@ -79,10 +82,20 @@ _aspen_m2_coupling_graph = CouplingGraph(_links) """Retrieved August 31, 2022: https://qcs.rigetti.com/qpus.""" +_ankaa_9q_3_coupling_graph = CouplingGraph.grid(3, 3) +"""Retrieved September 11, 2024: https://qcs.rigetti.com/qpus.""" + +_ankaa_2_coupling_graph = CouplingGraph.grid(7, 12) +"""Retrieved September 11, 2024: https://qcs.rigetti.com/qpus.""" + Aspen11Model = MachineModel(40, _aspen_11_coupling_graph, rigetti_gate_set) """A BQSKit MachineModel for Rigetti's Aspen-11 quantum processor.""" AspenM2Model = MachineModel(80, _aspen_m2_coupling_graph, rigetti_gate_set) """A BQSKit MachineModel for Rigetti's Aspen-M-2 quantum processor.""" +ANKAA2Model = MachineModel(84, _ankaa_2_coupling_graph, ankaa_gate_set) + +ANKAA9Q3Model = MachineModel(9, _ankaa_9q_3_coupling_graph, ankaa_gate_set) + __all__ = ['Aspen11Model', 'AspenM2Model'] From 9629e3c4f233bbe5378d39145f22744632a3cb67 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Wed, 11 Sep 2024 17:48:10 -0400 Subject: [PATCH 186/197] Update docstring --- bqskit/compiler/registry.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bqskit/compiler/registry.py b/bqskit/compiler/registry.py index c6d25eb5d..9a2d5d452 100644 --- a/bqskit/compiler/registry.py +++ b/bqskit/compiler/registry.py @@ -66,8 +66,7 @@ def register_workflow( `key`. If `key` is already registered, a warning will be logged. optimization_level (int): The optimization level with which to - register the workflow. If no level is provided, the Workflow will - be registered as level 1. + register the workflow. target_type (str): Register a workflow for targets of this type. Must be 'circuit', 'unitary', 'stateprep', or 'statemap'. From 15c05fe8eb8d8757a9a9ef10491150fb46e695c4 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 12 Sep 2024 09:16:13 -0400 Subject: [PATCH 187/197] Added Gate Guide --- docs/guides/customgate.md | 168 ++++++++++++++++++++++++++++++++++++++ docs/index.rst | 3 +- 2 files changed, 170 insertions(+), 1 deletion(-) create mode 100644 docs/guides/customgate.md diff --git a/docs/guides/customgate.md b/docs/guides/customgate.md new file mode 100644 index 000000000..203f0154d --- /dev/null +++ b/docs/guides/customgate.md @@ -0,0 +1,168 @@ +# Implementing a Custom Gate + +BQSKit's claims great portability, and as such, most algorithms in BQSKit can +work natively with any gate set. We have included many commonly used gates +inside of the [`bqskit.ir.gates`](https://bqskit.readthedocs.io/en/latest/source/ir.html#module-bqskit.ir.gates) +subpackage, but you may want to experiment with your own gates. In this tutorial, +we will implement a custom gate in BQSKit. Since BQSKit's algorithms are built +on numerical instantiation, this process is usually as simple as defining a new +subclass with a unitary at a high-level. + +For example, let's look at the `TGate` definition in BQSKit: + +```python +... +class TGate(ConstantGate, QubitGate): + _num_qudits = 1 + _qasm_name = 't' + _utry = UnitaryMatrix( + [ + [1, 0], + [0, cmath.exp(1j * cmath.pi / 4)], + ], + ) +``` + +A gate is defined by subclassing [`Gate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.html#bqskit.ir.Gate), +however, there are some abstract subclasses that can extended instead to simplify the process. For example, the `TGate` is a subclass of +[`ConstantGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.ConstantGate.html#bqskit.ir.ConstantGate) and +[`QubitGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.QubitGate.html#bqskit.ir.QubitGate). The `ConstantGate` +subclass is used for gates that have a fixed unitary matrix, and the `QubitGate` subclass is used for gates that act only on qubits -- rather than qudits. In the following sections, the process of defining a custom gate will be explained in more detail. + +## Defining a Custom Gate + +To define a custom gate, you need to subclass [`Gate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.html#bqskit.ir.Gate), and +define all the required attributes. These attributes can be defined as instance variables, class variables, or through methods. The following +attributes are required: + +- `_num_qudits`: The number of qudits the gate acts on. +- `_num_params`: The number of parameters the gate takes. +- `_radixes`: The radixes of the qudits this gate acts on. This is a list of integers, where each integer is the radix of the corresponding qudit. For example, `[2, 2]` would be a 2-qubit gate, `[3, 3]` would be a 2-qutrit gate, and `[2, 3, 3]` would be a gate that acts on a qubit and two qutrits. +- `_name`: The name of the gate. This used during print operations. +- `_qasm_name`: The name of the gate in QASM. (Qubit only gates, should use lowercase, optional) + +Additionally, you will need to override the abstract method [`get_unitary`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.Unitary.get_unitary.html#bqskit.qis.Unitary.get_unitary). This method maps the parameters of the gate to a unitary matrix. + +Here is an example of a custom gate that acts on a single qubit: + +```python +import cmath +from bqskit.ir.gate import Gate +from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix +from bqskit.qis.unitary.unitary import RealVector + +class MyGate(Gate): + _num_qudits = 1 + _num_params = 1 + _radixes = (2,) + _name = 'MyGate' + _qasm_name = 'mygate' + + def get_unitary(self, params: RealVector) -> UnitaryMatrix: + theta = params[0] + return UnitaryMatrix( + [ + [cmath.exp(1j * theta / 2), 0], + [0, cmath.exp(-1j * theta / 2)], + ], + ) +``` + +Note that the `params` argument is a [`RealVector`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.RealVector.html#bqskit.qis.RealVector) object, which is an alias for many types of float arrays. There is a helper method in the `Gate` class hierarchy called [`check_parameters`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.Unitary.check_parameters.html#bqskit.qis.Unitary.check_parameters) that can be used to validate the parameters before using them. This will check for the correct types and lengths of the parameters: + +```python +... + def get_unitary(self, params: RealVector) -> UnitaryMatrix: + self.check_parameters(params) + ... + return UnitaryMatrix( + ... + ) +``` + +As mentioned previously, the required attributes can be defined as class variables, like in the above example, or as instance variables. The following example shows how to define the same gate with instance variables: + +```python +import cmath +from bqskit.ir.gate import Gate +from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix +from bqskit.qis.unitary.unitary import RealVector + +class MyGate(Gate): + def __init__(self, num_qudits: int) -> None: + self._num_qudits = num_qudits + self._num_params = 1 + self._radixes = tuple([2] * num_qudits) + self._name = 'MyGate' + + def get_unitary(self, params: RealVector) -> UnitaryMatrix: + self.check_parameters(params) + theta = params[0] + base = UnitaryMatrix( + [ + [cmath.exp(1j * theta / 2), 0], + [0, cmath.exp(-1j * theta / 2)], + ], + ) + base.otimes(*[base] * (self._num_qudits - 1)) # base tensor product with itself +``` + +This style is helpful when the gate's attributes are dependent on the constructor arguments. The `get_unitary` method should be implemented in the same way as before. + + +## Utilizing Helper Classes + +BQSKit provides some helper classes to simplify the process of defining gates. In the first example of this guide, we used the `ConstantGate` and `QubitGate` helper classes. To use these helper subclasses, we will subclass them instead of `Gate`. The following are the current helper classes available: + +- [`ConstantGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.ConstantGate.html#bqskit.ir.ConstantGate): A gate that has a fixed unitary matrix with no parameters. This will automatically set `_num_params` to 0, and swap the `get_unitary` method for a `_utry` variable. Additionally, these gates are made to be differentiable trivially. +- [`QubitGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.QubitGate.html#bqskit.ir.QubitGate): A gate that acts only on qubits. This defines `_radixes` to be all `2`s. +- [`QutritGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.QutritGate.html#bqskit.ir.QutritGate): A gate that acts on qutrits. This defines `_radixes` to be all `3`s. +- [`QuditGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.QuditGate.html#bqskit.ir.QuditGate): A gate that acts on qudits of the same radix. This swaps the `_radixes` requirement for a `_radix` requirement. This is useful for gates that act on qudits of the same radix, but not necessarily only qubits or qutrits. +- [`ComposedGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.ComposedGate.html#bqskit.ir.ComposedGate): A gate that is composed of other gates. This provides methods to dynamically determine if the gate is differentiable or optimizable via other means. + +## Differentiable Gates + +If you are implementing a parameterized gate, you may want to make it differentiable. By making a gate differentiable, you allow it to be used by out instantiation engine. In turn, this allows synthesis and other algorithms to work more easily with these gates. To do this, you will need to additionally subclass [`DifferentiableUnitary`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.DifferentiableUnitary.html) and implement the [`get_grad`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.DifferentiableUnitary.get_grad.html#bqskit.qis.DifferentiableUnitary.get_grad) method. `ConstantGate`s are trivially differentiable, as they have no parameters. + +Most of the time, the [`get_unitary_and_grad`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.DifferentiableUnitary.get_unitary_and_grad.html#bqskit.qis.DifferentiableUnitary.get_unitary_and_grad) method is called by other parts of BQSKit, since both the unitary and gradient are typically needed at the same time. For most gates, computing them at the same time can allow for greater efficiency, since the unitary and gradient can share some computations. + +Let's make `MyGate` differentiable: + +```python +import cmath +from bqskit.ir.gate import Gate +from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix +from bqskit.qis.unitary.unitary import RealVector +from bqskit.qis.unitary.differentiableunitary import DifferentiableUnitary + +class MyGate(Gate, DifferentiableUnitary): + _num_qudits = 1 + _num_params = 1 + _radixes = (2,) + _name = 'MyGate' + _qasm_name = 'mygate' + + def get_unitary(self, params: RealVector) -> UnitaryMatrix: + self.check_parameters(params) + theta = params[0] + return UnitaryMatrix( + [ + [cmath.exp(1j * theta / 2), 0], + [0, cmath.exp(-1j * theta / 2)], + ], + ) + + def get_grad(self, params: RealVector) -> npt.NDArray[np.complex128]: + self.check_parameters(params) + theta = params[0] + return np.array( + [ + [ + [1j / 2 * cmath.exp(1j * theta / 2), 0], + [0, -1j / 2 * cmath.exp(-1j * theta / 2)], + ], + ], + ) +``` + +The `get_grad` method should return a 3D array, where the first index is the parameter index. `get_grad(params)[i]` should return the gradient of the unitary with respect to the `i`-th parameter. The gradient should be a matrix of the same shape as the unitary matrix, where each element is the derivative of the unitary matrix element with respect to the parameter. diff --git a/docs/index.rst b/docs/index.rst index d1c52a3c0..fc6a0c22f 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -25,8 +25,9 @@ our `tutorial series. `_ :caption: Guides :maxdepth: 1 - guides/distributing.md + guides/customgate.md guides/custompass.md + guides/distributing.md .. toctree:: :caption: API Reference From e9dbe4bbcefb8c61fcd29d23651c27039b1146a3 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 12 Sep 2024 09:45:18 -0400 Subject: [PATCH 188/197] Update --- docs/guides/customgate.md | 46 +++++++++++++++++++++++++------------ docs/guides/distributing.md | 2 +- 2 files changed, 32 insertions(+), 16 deletions(-) diff --git a/docs/guides/customgate.md b/docs/guides/customgate.md index 203f0154d..515a52836 100644 --- a/docs/guides/customgate.md +++ b/docs/guides/customgate.md @@ -1,4 +1,4 @@ -# Implementing a Custom Gate +# Implement a Custom Gate BQSKit's claims great portability, and as such, most algorithms in BQSKit can work natively with any gate set. We have included many commonly used gates @@ -8,7 +8,7 @@ we will implement a custom gate in BQSKit. Since BQSKit's algorithms are built on numerical instantiation, this process is usually as simple as defining a new subclass with a unitary at a high-level. -For example, let's look at the `TGate` definition in BQSKit: +For example, let's look at the [`TGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.TGate.html#bqskit.ir.gates.TGate) definition in BQSKit: ```python ... @@ -24,10 +24,10 @@ class TGate(ConstantGate, QubitGate): ``` A gate is defined by subclassing [`Gate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.html#bqskit.ir.Gate), -however, there are some abstract subclasses that can extended instead to simplify the process. For example, the `TGate` is a subclass of -[`ConstantGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.ConstantGate.html#bqskit.ir.ConstantGate) and -[`QubitGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.QubitGate.html#bqskit.ir.QubitGate). The `ConstantGate` -subclass is used for gates that have a fixed unitary matrix, and the `QubitGate` subclass is used for gates that act only on qubits -- rather than qudits. In the following sections, the process of defining a custom gate will be explained in more detail. +however, there are some abstract subclasses that can be extended instead to simplify the process. For example, the [`TGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.TGate.html#bqskit.ir.gates.TGate) is a subclass of +[`ConstantGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.ConstantGate.html#bqskit.ir.gates.ConstantGate) and +[`QubitGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.QubitGate.html#bqskit.ir.gates.QubitGate). The [`ConstantGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.ConstantGate.html#bqskit.ir.gates.ConstantGate) +subclass is used for gates that have a fixed unitary matrix, and the [`QubitGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.QubitGate.html#bqskit.ir.gates.QubitGate) subclass is used for gates that act only on qubits -- rather than qudits. In the following sections, the process of defining a custom gate will be explained in more detail. ## Defining a Custom Gate @@ -35,11 +35,11 @@ To define a custom gate, you need to subclass [`Gate`](https://bqskit.readthedoc define all the required attributes. These attributes can be defined as instance variables, class variables, or through methods. The following attributes are required: -- `_num_qudits`: The number of qudits the gate acts on. -- `_num_params`: The number of parameters the gate takes. -- `_radixes`: The radixes of the qudits this gate acts on. This is a list of integers, where each integer is the radix of the corresponding qudit. For example, `[2, 2]` would be a 2-qubit gate, `[3, 3]` would be a 2-qutrit gate, and `[2, 3, 3]` would be a gate that acts on a qubit and two qutrits. -- `_name`: The name of the gate. This used during print operations. -- `_qasm_name`: The name of the gate in QASM. (Qubit only gates, should use lowercase, optional) +- [`_num_qudits`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.num_qudits.html#bqskit.ir.Gate.num_qudits): The number of qudits the gate acts on. +- [`_num_params`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.num_params.html#bqskit.ir.Gate.num_params): The number of parameters the gate takes. +- [`_radixes`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.radixes.html#bqskit.ir.Gate.radixes): The radixes of the qudits this gate acts on. This is a tuple of integers, where each integer is the radix of the corresponding qudit. For example, `(2, 2)` would be a 2-qubit gate, `(3, 3)` would be a 2-qutrit gate, and `(2, 3, 3)` would be a gate that acts on a qubit and two qutrits. +- [`_name`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.name.html#bqskit.ir.Gate.name): The name of the gate. This used during print operations. +- [`_qasm_name`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.qasm_name.html#bqskit.ir.Gate.qasm_name): The name of the gate in QASM. (Qubit only gates, should use lowercase, optional) Additionally, you will need to override the abstract method [`get_unitary`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.Unitary.get_unitary.html#bqskit.qis.Unitary.get_unitary). This method maps the parameters of the gate to a unitary matrix. @@ -68,7 +68,7 @@ class MyGate(Gate): ) ``` -Note that the `params` argument is a [`RealVector`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.RealVector.html#bqskit.qis.RealVector) object, which is an alias for many types of float arrays. There is a helper method in the `Gate` class hierarchy called [`check_parameters`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.Unitary.check_parameters.html#bqskit.qis.Unitary.check_parameters) that can be used to validate the parameters before using them. This will check for the correct types and lengths of the parameters: +Note that the `params` argument is a [`RealVector`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.RealVector.html#bqskit.qis.RealVector) object, which is an alias for many types of float arrays. There is a helper method in the [`Gate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.html#bqskit.ir.Gate) class hierarchy called [`check_parameters`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.Unitary.check_parameters.html#bqskit.qis.Unitary.check_parameters) that can be used to validate the parameters before using them. This will check for the correct types and lengths of the parameters: ```python ... @@ -80,7 +80,7 @@ Note that the `params` argument is a [`RealVector`](https://bqskit.readthedocs.i ) ``` -As mentioned previously, the required attributes can be defined as class variables, like in the above example, or as instance variables. The following example shows how to define the same gate with instance variables: +As mentioned previously, the required attributes can be defined as class variables, like in the above example, or as instance variables. The following example shows how to define a tensor product of an arbitrary number of `MyGate`s using instance variables: ```python import cmath @@ -88,12 +88,12 @@ from bqskit.ir.gate import Gate from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix from bqskit.qis.unitary.unitary import RealVector -class MyGate(Gate): +class MyGateTensor(Gate): def __init__(self, num_qudits: int) -> None: self._num_qudits = num_qudits self._num_params = 1 self._radixes = tuple([2] * num_qudits) - self._name = 'MyGate' + self._name = f'MyGateTensor{num_qudits}' def get_unitary(self, params: RealVector) -> UnitaryMatrix: self.check_parameters(params) @@ -105,10 +105,26 @@ class MyGate(Gate): ], ) base.otimes(*[base] * (self._num_qudits - 1)) # base tensor product with itself + # Note: Since the unitary is diagonal, there are more efficient ways to + # compute the tensor product, but this is a simple example meant + # to demonstrate the concept. In general, you should always implement + # the most efficient method for your gate. ``` This style is helpful when the gate's attributes are dependent on the constructor arguments. The `get_unitary` method should be implemented in the same way as before. +The last way to define the attributes is through methods. The corresponding property names can be found on the [`Gate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.html#bqskit.ir.Gate) class. The following example computers the gate name of `MyGateTensor` through the `name` property: + +```python +... +class MyGateTensor(Gate): + ... # __init__ and get_unitary methods same as before without _name attribute + + @property + def name(self) -> str: + return f'MyGateTensor{self._num_qudits}' + +``` ## Utilizing Helper Classes diff --git a/docs/guides/distributing.md b/docs/guides/distributing.md index 38966bd15..c79c34e72 100644 --- a/docs/guides/distributing.md +++ b/docs/guides/distributing.md @@ -1,4 +1,4 @@ -# Distributing BQSKit Across a Cluster +# Distribute BQSKit Across a Cluster This guide describes how to launch a BQSKit Runtime Server in detached mode on one or more computers, connect to it, and perform compilations on the server. From 639056efc4c1580da84ad75d9ff26c5d9355bd87 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 12 Sep 2024 09:51:22 -0400 Subject: [PATCH 189/197] Added qasm section --- docs/guides/customgate.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/guides/customgate.md b/docs/guides/customgate.md index 515a52836..ec1f9880b 100644 --- a/docs/guides/customgate.md +++ b/docs/guides/customgate.md @@ -182,3 +182,7 @@ class MyGate(Gate, DifferentiableUnitary): ``` The `get_grad` method should return a 3D array, where the first index is the parameter index. `get_grad(params)[i]` should return the gradient of the unitary with respect to the `i`-th parameter. The gradient should be a matrix of the same shape as the unitary matrix, where each element is the derivative of the unitary matrix element with respect to the parameter. + +## Working with QASM + +If you want to use your gate in QASM, you will need to define the `_qasm_name` attribute. This is the name of the gate in QASM. However, some gates require special qasm definitions to be included at the top of a qasm file. This can be achieved by defining the [`get_qasm_gate_def`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.get_qasm_gate_def.html#bqskit.ir.Gate.get_qasm_gate_def) method. This method returns a string, which will be included as-is at the top of every qasm file that uses the gate. From cc6af7c3b28b7199238802e7afc42254ab6e7f9d Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 12 Sep 2024 10:24:32 -0400 Subject: [PATCH 190/197] Fixed some links --- docs/guides/customgate.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/guides/customgate.md b/docs/guides/customgate.md index ec1f9880b..dff66287f 100644 --- a/docs/guides/customgate.md +++ b/docs/guides/customgate.md @@ -38,7 +38,7 @@ attributes are required: - [`_num_qudits`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.num_qudits.html#bqskit.ir.Gate.num_qudits): The number of qudits the gate acts on. - [`_num_params`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.num_params.html#bqskit.ir.Gate.num_params): The number of parameters the gate takes. - [`_radixes`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.radixes.html#bqskit.ir.Gate.radixes): The radixes of the qudits this gate acts on. This is a tuple of integers, where each integer is the radix of the corresponding qudit. For example, `(2, 2)` would be a 2-qubit gate, `(3, 3)` would be a 2-qutrit gate, and `(2, 3, 3)` would be a gate that acts on a qubit and two qutrits. -- [`_name`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.name.html#bqskit.ir.Gate.name): The name of the gate. This used during print operations. +- [`_name`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.name.html#bqskit.ir.Gate.name): The name of the gate. This is used during print operations. - [`_qasm_name`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.qasm_name.html#bqskit.ir.Gate.qasm_name): The name of the gate in QASM. (Qubit only gates, should use lowercase, optional) Additionally, you will need to override the abstract method [`get_unitary`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.Unitary.get_unitary.html#bqskit.qis.Unitary.get_unitary). This method maps the parameters of the gate to a unitary matrix. @@ -68,7 +68,7 @@ class MyGate(Gate): ) ``` -Note that the `params` argument is a [`RealVector`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.RealVector.html#bqskit.qis.RealVector) object, which is an alias for many types of float arrays. There is a helper method in the [`Gate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.html#bqskit.ir.Gate) class hierarchy called [`check_parameters`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.Unitary.check_parameters.html#bqskit.qis.Unitary.check_parameters) that can be used to validate the parameters before using them. This will check for the correct types and lengths of the parameters: +Note the `params` argument is a [`RealVector`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.RealVector.html#bqskit.qis.RealVector) object, which is an alias for many types of float arrays. There is a helper method in the [`Gate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.html#bqskit.ir.Gate) class hierarchy called [`check_parameters`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.Unitary.check_parameters.html#bqskit.qis.Unitary.check_parameters) that can be used to validate the parameters before using them. This will check for the correct types and lengths of the parameters: ```python ... @@ -111,9 +111,9 @@ class MyGateTensor(Gate): # the most efficient method for your gate. ``` -This style is helpful when the gate's attributes are dependent on the constructor arguments. The `get_unitary` method should be implemented in the same way as before. +This style is helpful when the gate's attributes are dependent on the constructor arguments. -The last way to define the attributes is through methods. The corresponding property names can be found on the [`Gate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.html#bqskit.ir.Gate) class. The following example computers the gate name of `MyGateTensor` through the `name` property: +The last way to define the attributes is through methods. The corresponding property names can be found on the [`Gate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.html#bqskit.ir.Gate) class. The following example computes the gate name of `MyGateTensor` through the `name` property: ```python ... @@ -128,13 +128,13 @@ class MyGateTensor(Gate): ## Utilizing Helper Classes -BQSKit provides some helper classes to simplify the process of defining gates. In the first example of this guide, we used the `ConstantGate` and `QubitGate` helper classes. To use these helper subclasses, we will subclass them instead of `Gate`. The following are the current helper classes available: +BQSKit provides some helper classes to simplify the process of defining gates. In the first example of this guide, we used the [`ConstantGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.ConstantGate.html#bqskit.ir.ConstantGate) and [`QubitGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.QubitGate.html#bqskit.ir.QubitGate) helper classes. To use these helper subclasses, we will subclass them instead of [`Gate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.html#bqskit.ir.Gate). The following are the available helper classes: -- [`ConstantGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.ConstantGate.html#bqskit.ir.ConstantGate): A gate that has a fixed unitary matrix with no parameters. This will automatically set `_num_params` to 0, and swap the `get_unitary` method for a `_utry` variable. Additionally, these gates are made to be differentiable trivially. -- [`QubitGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.QubitGate.html#bqskit.ir.QubitGate): A gate that acts only on qubits. This defines `_radixes` to be all `2`s. -- [`QutritGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.QutritGate.html#bqskit.ir.QutritGate): A gate that acts on qutrits. This defines `_radixes` to be all `3`s. -- [`QuditGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.QuditGate.html#bqskit.ir.QuditGate): A gate that acts on qudits of the same radix. This swaps the `_radixes` requirement for a `_radix` requirement. This is useful for gates that act on qudits of the same radix, but not necessarily only qubits or qutrits. -- [`ComposedGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.ComposedGate.html#bqskit.ir.ComposedGate): A gate that is composed of other gates. This provides methods to dynamically determine if the gate is differentiable or optimizable via other means. +- [`ConstantGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.ConstantGate.html#bqskit.ir.gates.ConstantGate): A gate that has a fixed unitary matrix with no parameters. This will automatically set `_num_params` to 0, and swap the `get_unitary` method for a `_utry` attribute. Additionally, these gates have the trivial differentiable implementations provided. +- [`QubitGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.QubitGate.html#bqskit.ir.gates.QubitGate): A gate that acts only on qubits. This defines `_radixes` to be all `2`s. +- [`QutritGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.QutritGate.html#bqskit.ir.gates.QutritGate): A gate that acts on qutrits. This defines `_radixes` to be all `3`s. +- [`QuditGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.QuditGate.html#bqskit.ir.gates.QuditGate): A gate that acts on qudits of the same radix. This swaps the `_radixes` requirement for a required `_radix` attribute. This is useful for gates that act on qudits of the same radix, but not necessarily only qubits or qutrits. +- [`ComposedGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.ComposedGate.html#bqskit.ir.gates.ComposedGate): A gate that is composed of other gates. This provides methods to dynamically determine if the gate is differentiable or optimizable via other means. ## Differentiable Gates From 1bafb03527ea77c1e99ae23c45532a9b02a92600 Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Thu, 12 Sep 2024 11:49:32 -0400 Subject: [PATCH 191/197] Update --- docs/guides/customgate.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/guides/customgate.md b/docs/guides/customgate.md index dff66287f..eca66e8cd 100644 --- a/docs/guides/customgate.md +++ b/docs/guides/customgate.md @@ -128,17 +128,17 @@ class MyGateTensor(Gate): ## Utilizing Helper Classes -BQSKit provides some helper classes to simplify the process of defining gates. In the first example of this guide, we used the [`ConstantGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.ConstantGate.html#bqskit.ir.ConstantGate) and [`QubitGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.QubitGate.html#bqskit.ir.QubitGate) helper classes. To use these helper subclasses, we will subclass them instead of [`Gate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.html#bqskit.ir.Gate). The following are the available helper classes: +BQSKit provides some helper classes to simplify the process of defining gates. In the first example of this guide, we used the [`ConstantGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.ConstantGate.html#bqskit.ir.gates.ConstantGate) and [`QubitGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.QubitGate.html#bqskit.ir.gates.QubitGate) helper classes. To use these helper subclasses, we will subclass them instead of [`Gate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.Gate.html#bqskit.ir.Gate). The following are the available helper classes: - [`ConstantGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.ConstantGate.html#bqskit.ir.gates.ConstantGate): A gate that has a fixed unitary matrix with no parameters. This will automatically set `_num_params` to 0, and swap the `get_unitary` method for a `_utry` attribute. Additionally, these gates have the trivial differentiable implementations provided. - [`QubitGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.QubitGate.html#bqskit.ir.gates.QubitGate): A gate that acts only on qubits. This defines `_radixes` to be all `2`s. -- [`QutritGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.QutritGate.html#bqskit.ir.gates.QutritGate): A gate that acts on qutrits. This defines `_radixes` to be all `3`s. +- [`QutritGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.QutritGate.html#bqskit.ir.gates.QutritGate): A gate that acts only on qutrits. This defines `_radixes` to be all `3`s. - [`QuditGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.QuditGate.html#bqskit.ir.gates.QuditGate): A gate that acts on qudits of the same radix. This swaps the `_radixes` requirement for a required `_radix` attribute. This is useful for gates that act on qudits of the same radix, but not necessarily only qubits or qutrits. - [`ComposedGate`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.ir.gates.ComposedGate.html#bqskit.ir.gates.ComposedGate): A gate that is composed of other gates. This provides methods to dynamically determine if the gate is differentiable or optimizable via other means. ## Differentiable Gates -If you are implementing a parameterized gate, you may want to make it differentiable. By making a gate differentiable, you allow it to be used by out instantiation engine. In turn, this allows synthesis and other algorithms to work more easily with these gates. To do this, you will need to additionally subclass [`DifferentiableUnitary`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.DifferentiableUnitary.html) and implement the [`get_grad`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.DifferentiableUnitary.get_grad.html#bqskit.qis.DifferentiableUnitary.get_grad) method. `ConstantGate`s are trivially differentiable, as they have no parameters. +If you are implementing a parameterized gate, you may want to make it differentiable. By making a gate differentiable, you allow it to be used by BQSKit's instantiation engine. In turn, this allows synthesis and other algorithms to work more easily with these gates. To do this, you will need to additionally subclass [`DifferentiableUnitary`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.DifferentiableUnitary.html) and implement the [`get_grad`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.DifferentiableUnitary.get_grad.html#bqskit.qis.DifferentiableUnitary.get_grad) method. `ConstantGate`s are trivially differentiable, as they have no parameters. Most of the time, the [`get_unitary_and_grad`](https://bqskit.readthedocs.io/en/latest/source/autogen/bqskit.qis.DifferentiableUnitary.get_unitary_and_grad.html#bqskit.qis.DifferentiableUnitary.get_unitary_and_grad) method is called by other parts of BQSKit, since both the unitary and gradient are typically needed at the same time. For most gates, computing them at the same time can allow for greater efficiency, since the unitary and gradient can share some computations. From 592b7b48faf50234d8a66fd6b759df3857732098 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Thu, 12 Sep 2024 22:30:41 +0300 Subject: [PATCH 192/197] Added paper references to docstrings --- bqskit/passes/search/generators/discrete.py | 3 ++- bqskit/utils/math.py | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/bqskit/passes/search/generators/discrete.py b/bqskit/passes/search/generators/discrete.py index daeffe374..248c7df98 100644 --- a/bqskit/passes/search/generators/discrete.py +++ b/bqskit/passes/search/generators/discrete.py @@ -26,7 +26,8 @@ class DiscreteLayerGenerator(LayerGenerator): """ The DiscreteLayerGenerator class. - Expands circuits using only discrete gates. + Expands circuits using only discrete gates. This is a non-reinforcement + learning version of diagonalizing in https://arxiv.org/abs/2409.00433. """ def __init__( diff --git a/bqskit/utils/math.py b/bqskit/utils/math.py index 53cd5cddb..c2d786b03 100644 --- a/bqskit/utils/math.py +++ b/bqskit/utils/math.py @@ -298,6 +298,9 @@ def diagonal_distance(unitary: npt.NDArray[np.complex128]) -> float: imately inverted by a diagonal unitary. A unitary is approximately inverted when the Hilbert-Schmidt distance to the identity is less than some threshold. + + A proof of correctness can be found in the appendix of: + https://arxiv.org/abs/2409.00433 Args: unitary (np.ndarray): The unitary matrix to check. From aee7292dd29e2c70d7449f1f33afa0d5cc16d8c2 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Thu, 12 Sep 2024 22:38:44 +0300 Subject: [PATCH 193/197] Diagonalization is default double_headed action but now can be parameterized to others --- bqskit/passes/search/generators/discrete.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/bqskit/passes/search/generators/discrete.py b/bqskit/passes/search/generators/discrete.py index 248c7df98..16bc18709 100644 --- a/bqskit/passes/search/generators/discrete.py +++ b/bqskit/passes/search/generators/discrete.py @@ -34,6 +34,7 @@ def __init__( self, gateset: Sequence[Gate] = [HGate(), TGate(), CNOTGate()], double_headed: bool = False, + dividing_gate_type = PauliZGate, ) -> None: """ Construct a DiscreteLayerGenerator. @@ -46,6 +47,11 @@ def __init__( double_headed (bool): If True, successors will be generated by both appending and prepending gates. This lets unitaries be diagonalized instead of inverted. (Default: False) + + dividing_gate (Callable[[int], Gate]): A gate that goes between + the two heads of the discrete searches. If double_headed is + False, this gate simply goes at the beggining of the circuit. + (Default: PauliZGate) Raises: ValueError: If the gateset is not a sequence. @@ -71,6 +77,7 @@ def __init__( raise ValueError(m) self.gateset = gateset self.double_headed = double_headed + self.dividing_gate_type = dividing_gate_type def gen_initial_layer( self, @@ -99,7 +106,7 @@ def gen_initial_layer( if self.double_headed: n = target.num_qudits span = list(range(n)) - init_circuit.append_gate(PauliZGate(n), span) + init_circuit.append_gate(self.dividing_gate_type(n), span) return init_circuit From 61952ff0e87338e7f0f5f43ba6e41577efb3bad8 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Thu, 12 Sep 2024 22:40:14 +0300 Subject: [PATCH 194/197] Type annotation added --- bqskit/passes/search/generators/discrete.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bqskit/passes/search/generators/discrete.py b/bqskit/passes/search/generators/discrete.py index 16bc18709..da201ee5c 100644 --- a/bqskit/passes/search/generators/discrete.py +++ b/bqskit/passes/search/generators/discrete.py @@ -2,6 +2,7 @@ from __future__ import annotations import logging +from typing import Callable from typing import Sequence from bqskit.compiler.passdata import PassData @@ -34,7 +35,7 @@ def __init__( self, gateset: Sequence[Gate] = [HGate(), TGate(), CNOTGate()], double_headed: bool = False, - dividing_gate_type = PauliZGate, + dividing_gate_type: Callable[[int], Gate] = PauliZGate, ) -> None: """ Construct a DiscreteLayerGenerator. From 5274da0752341afeebac6a6a6c842fa593ca6c17 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Thu, 12 Sep 2024 22:40:37 +0300 Subject: [PATCH 195/197] Added TODO --- bqskit/passes/search/generators/discrete.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bqskit/passes/search/generators/discrete.py b/bqskit/passes/search/generators/discrete.py index da201ee5c..22c13909a 100644 --- a/bqskit/passes/search/generators/discrete.py +++ b/bqskit/passes/search/generators/discrete.py @@ -60,6 +60,9 @@ def __init__( ValueError: If the gateset contains a parameterized gate. ValueError: If the radices of gates are different. + + TODO: + Check universality of gateset. """ if not is_sequence(gateset): m = f'Expected sequence of gates, got {type(gateset)}.' From 0e93ea7328c164a968ecef0f8de684a2f1201ca8 Mon Sep 17 00:00:00 2001 From: Mathias Weiden Date: Thu, 12 Sep 2024 22:55:28 +0300 Subject: [PATCH 196/197] Pre-commit --- bqskit/passes/search/generators/discrete.py | 9 +++++---- bqskit/utils/math.py | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/bqskit/passes/search/generators/discrete.py b/bqskit/passes/search/generators/discrete.py index 22c13909a..33ae4d0d9 100644 --- a/bqskit/passes/search/generators/discrete.py +++ b/bqskit/passes/search/generators/discrete.py @@ -28,7 +28,8 @@ class DiscreteLayerGenerator(LayerGenerator): The DiscreteLayerGenerator class. Expands circuits using only discrete gates. This is a non-reinforcement - learning version of diagonalizing in https://arxiv.org/abs/2409.00433. + learning version of diagonalizing in + https://arxiv.org/abs/2409.00433. """ def __init__( @@ -48,9 +49,9 @@ def __init__( double_headed (bool): If True, successors will be generated by both appending and prepending gates. This lets unitaries be diagonalized instead of inverted. (Default: False) - + dividing_gate (Callable[[int], Gate]): A gate that goes between - the two heads of the discrete searches. If double_headed is + the two heads of the discrete searches. If double_headed is False, this gate simply goes at the beggining of the circuit. (Default: PauliZGate) @@ -60,7 +61,7 @@ def __init__( ValueError: If the gateset contains a parameterized gate. ValueError: If the radices of gates are different. - + TODO: Check universality of gateset. """ diff --git a/bqskit/utils/math.py b/bqskit/utils/math.py index c2d786b03..f2560044d 100644 --- a/bqskit/utils/math.py +++ b/bqskit/utils/math.py @@ -298,7 +298,7 @@ def diagonal_distance(unitary: npt.NDArray[np.complex128]) -> float: imately inverted by a diagonal unitary. A unitary is approximately inverted when the Hilbert-Schmidt distance to the identity is less than some threshold. - + A proof of correctness can be found in the appendix of: https://arxiv.org/abs/2409.00433 From 992473ff52bf673c0d8e7e6cbced85892a417d7d Mon Sep 17 00:00:00 2001 From: Ed Younis Date: Fri, 13 Sep 2024 14:27:48 -0400 Subject: [PATCH 197/197] 1.2.0 --- bqskit/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bqskit/_version.py b/bqskit/_version.py index b035796df..726c5cd4b 100644 --- a/bqskit/_version.py +++ b/bqskit/_version.py @@ -1,4 +1,4 @@ """This module contains the version information for BQSKit.""" from __future__ import annotations -__version_info__ = ('1', '1', '2') +__version_info__ = ('1', '2', '0') __version__ = '.'.join(__version_info__[:3]) + ''.join(__version_info__[3:])