Skip to content

E . Usage examples

Unai Alegre-Ibarra edited this page Aug 1, 2022 · 27 revisions

The software is provided with some usage examples in the form of Jupiter notebooks. These can be found in brainspy-examples. Below, a general explanation of how to create a custom model is provided.

Creating a custom model with a single DNPU layer

The main intention for this library is to be used as an extension of PyTorch, where you can create your custom dopant-network based circuit designs to simulate them and find adequate control voltages for a particular task. Then, the library will support testing if this behaviour matches with real hardware dopant-network devices. Custom models are expected to be an instance of torch.nn.Module. An example of a custom model with a single DNPU is presented below. Note that, this example is for a single DNPU module, and that if more were required, some of the functions that need to be implemented change.


from brainspy.processors.dnpu import DNPU
from brainspy.processors.processor import Processor
from brainspy.utils.pytorch import TorchUtils


class SingleDNPUCustomModel(torch.nn.Module):
    def __init__(self, configs):
        super(SingleDNPUCustomModel, self).__init__()
        self.gamma = 1
        self.node_no = 1
        model_data = torch.load(configs['model_dir'],
                                map_location=TorchUtils.get_device())
        processor = Processor(configs, model_data['info'],
                              model_data['model_state_dict'])
        self.dnpu = DNPU(processor=processor,
                         data_input_indices=[[2,3]] *
                         self.node_no,
                         forward_pass_type='vec')
        # Remember to add an input transformation, if required   
        # In this case, the example assumes that input data will be in a range from 0 to 1
        self.dnpu.add_input_transform([0, 1])

    def forward(self, x):
        x = self.dnpu(x)
        return x

    # If you want to swap from simulation to hardware, or vice-versa you need these functions
    def hw_eval(self, configs, info=None):
        self.eval()
        self.dnpu.hw_eval(configs, info)

    def sw_train(self, configs, info=None, model_state_dict=None):
        self.train()
        self.dnpu.sw_train(configs, info, model_state_dict)

    ##########################################################################################

    # If you want to be able to get information about the ranges from outside, you have to add the following functions.
    def get_input_ranges(self):
        return self.dnpu.get_input_ranges()

    def get_control_ranges(self):
        return self.dnpu.get_control_ranges()

    def get_control_voltages(self):
        return self.dnpu.get_control_voltages()

    def set_control_voltages(self, control_voltages):
        self.dnpu.set_control_voltages(control_voltages)

    def get_clipping_value(self):
        return self.dnpu.get_clipping_value()

    # For being able to maintain control voltages within ranges, you should implement the following functions (only those which you are planning to use)
    def regularizer(self):
        return self.gamma * (self.dnpu.regularizer())

    def constraint_weights(self):
        self.dnpu.constraint_control_voltages()

    # For being able to produce same target size as outputs when using hardware validation
    def format_targets(self, x: torch.Tensor) -> torch.Tensor:
        return self.dnpu.format_targets(x)

    ######################################################################################################################################################

    # If you want to implement on-chip GA, you need these functions
    def is_hardware(self):
        return self.dnpu.processor.is_hardware

    def close(self):
        self.dnpu.close()

Creating a custom model with multiple DNPU layers

import torch

from brainspy.processors.dnpu import DNPU
from brainspy.processors.modules.bn import DNPUBatchNorm
from brainspy.processors.processor import Processor
from brainspy.utils.pytorch import TorchUtils

    def __init__(self, configs):
        super(MultipleDNPUCustomModel, self).__init__()
        self.gamma = 1
        self.node_no_l1 = 2
        self.node_no_l2 = 1
        model_data = torch.load(configs['model_dir'],
                                map_location=TorchUtils.get_device())
        processor = Processor(configs, model_data['info'],
                              model_data['model_state_dict'])
        self.dnpu1 = DNPUBatchNorm(processor=processor,
                                   data_input_indices=[[2, 3]] *
                                   self.node_no_l1,
                                   forward_pass_type='vec')
        self.dnpu2 = DNPU(processor=processor,
                          data_input_indices=[[2, 3]] * self.node_no_l2,
                          forward_pass_type='vec')
        # Remember to add an input transformation, if required
        # In this case, the example assumes that input data will be in a range from 0 to 1
        self.dnpu1.add_input_transform([-1, 1])
        self.dnpu2.add_input_transform([0, 1])

    def forward(self, x):
        x = torch.cat((x, x), dim=1)
        x = torch.sigmoid(self.dnpu1(x))
        x = self.dnpu2(x)
        return x

    # If you want to swap from simulation to hardware, or vice-versa you need these functions
    def hw_eval(self, configs, info=None):
        self.eval()
        self.dnpu1.hw_eval(configs, info)
        self.dnpu2.hw_eval(configs, info)

    def sw_train(self, configs, info=None, model_state_dict=None):
        self.train()
        self.dnpu1.sw_train(configs, info, model_state_dict)
        self.dnpu2.sw_train(configs, info, model_state_dict)

    ##########################################################################################

    # If you want to be able to get information about the ranges from outside, you have to add the following functions.
    def get_input_ranges(self):
        return torch.cat(
            (self.dnpu1.get_input_ranges(), self.dnpu2.get_input_ranges()))

    def get_control_ranges(self):
        return torch.cat(
            (self.dnpu1.get_control_ranges(), self.dnpu2.get_control_ranges()))

    def get_control_voltages(self):
        return torch.cat((self.dnpu1.get_control_voltages(),
                          self.dnpu2.get_control_voltages()))

    def set_control_voltages(self, control_voltages):
        self.dnpu1.set_control_voltages(control_voltages[:2])
        self.dnpu2.set_control_voltages(control_voltages[-1].unsqueeze(0))

    def get_clipping_value(self):
        return self.dnpu.get_clipping_value()

    # For being able to maintain control voltages within ranges, you should implement the following functions (only those which you are planning to use)
    def regularizer(self):
        return self.gamma * (self.dnpu1.regularizer() +
                             self.dnpu2.regularizer())

    def constraint_weights(self):
        self.dnpu1.constraint_control_voltages()
        self.dnpu2.constraint_control_voltages()

    # For being able to produce same target size as outputs when using hardware validation
    def format_targets(self, x: torch.Tensor) -> torch.Tensor:
        return self.dnpu2.format_targets(self.dnpu1.format_targets(x))

    ######################################################################################################################################################

    # If you want to implement on-chip GA, you need these functions
    def is_hardware(self):
        return self.dnpu1.processor.is_hardware(
        ) or self.dnpu2.processor.is_hardware()

    def close(self):
        self.dnpu1.close()
        self.dnpu2.close()

Creating a custom logger

By default the custom algorithms that are included in brains-py allow to add a custom logger in order to enable gathering and studying further what is going on during the training process. Below, a simple implementation with a Tensorboard logger is introduced, which would enable to log the cost function. Your custom logger would require you to add each of the functions provided in the example. For more information about tensorboard in pytorch visit the official tutorial.



class Logger:
    def __init__(self, log_dir, comment="DEFAULT_LOGGER"):
        # TODO: LOG HIPERPARAMETERS IN THE COMMENT e.g. "LR_0.1_BATCH_16"
        self.log = SummaryWriter(log_dir, comment=comment)
        self.gate = ""

    def log_train_inputs(self, inputs, targets):
        # self.log.add_graph(net, images)
        pass

    def log_train_predictions(self, predictions):
        pass

    def log_ios_train(self, inputs, targets, predictions, epoch):
        pass

    def log_val_predictions(self, inputs, targets):
        pass

    def log_performance(self, train_losses, val_losses, epoch):
        if val_losses == []:
            self.log.add_scalar("Cost/train/" + self.gate, train_losses[-1],
                                epoch)
        else:
            self.log.add_scalars(
                "Cost/" + self.gate,
                {
                    "train": train_losses[-1],
                    "dev": val_losses[-1]
                },
                epoch,
            )

    def log_outputs(self, outputs):
        pass

    def close(self):
        self.log.close()

Example: Running the ring classifier with the multiple models (with the example at brainspy-tasks)

    import datetime as d
    from bspytasks.ring.tasks.searcher import search_solution

    from brainspy.utils import manager
    from brainspy.utils.io import load_configs
    from ring_logger import Logger

    import matplotlib
    matplotlib.use('Agg')

    # Load configurations
    configs = load_configs(
        '/home/unai/Documents/3-Programming/bspy/examples-multiple-devices/configs.yaml'
    )

    criterion = manager.get_criterion(configs["algorithm"]['criterion'])
    algorithm = manager.get_algorithm(configs["algorithm"]['type'])

    logger = Logger(f"." +
                    str(d.datetime.now().timestamp()))

    search_solution(configs,
                    model.MultipleDNPUCustomModel,
                    criterion,
                    algorithm,

Where the configs.yaml are:

results_dir: "tmp/TEST/output/ring/exp"
runs: 3
start_gap: 0.4
stop_gap: 0.00825
data:
  gap: 0.5
  load: false # If load is false, it generates a new dataset. If load is a path to the data, it loads it to the data
  sample_no: 2000
  batch_size: 128
  worker_no: 0
  pin_memory: True
  split_percentages: # The data is divided into training, validation and test datasets respectively
    - 0.8 # Percentage of the data used for training
    - 0.1 # Percentage of the data used for validation
    - 0.1 # Percentage of the data used for test
algorithm: 
    type: "gradient"
    epochs: 250
    learning_rate: 0.001
    criterion: "fisher"
    optimizer: "adam"
    constraint_control_voltages: "clip"
processor: 
    processor_type: "simulation" # Possible values are: simulation, simulation_debug, cdaq_to_cdaq, and cdaq_to_nidaq
    model_dir: "training_data.pt"
    input_indices: # It specifies the indices of the activation data array that will be considered as inputs
      - 1
      - 2
    electrode_effects:
      # amplification: [28.5] # It always has to be a list
      # output_clipping: null
      voltage_ranges:
        [
          [-0.7, 0.3],
          [-1.2, 0.7],
          [-1.2, 0.7],
          [-1.2, 0.7],
          [-1.2, 0.7],
          [-1.2, 0.7],
          [-0.7, 0.3],
        ]
      noise:
          type: gaussian
          variance: 2.07
waveform:
  plateau_length: 10
  slope_length: 30
accuracy: # Configurations for the perceptron
  epochs: 300
  learning_rate: 0.02
  batch_size: 128
  worker_no: 0
  pin_memory: False
Clone this wiki locally