diff --git a/.gitignore b/.gitignore index e29fd08f..d53852fe 100644 --- a/.gitignore +++ b/.gitignore @@ -154,3 +154,4 @@ cython_debug/ .vscode debug +*.drawio diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 67f8991b..54b4331a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ repos: stages: [commit] - repo: https://github.com/psf/black - rev: 24.2.0 + rev: 24.3.0 hooks: - id: black stages: [commit] @@ -40,10 +40,9 @@ repos: args: [--pytest-test-first] - repo: https://github.com/python-poetry/poetry - rev: 1.7.1 + rev: 1.8.0 hooks: - id: poetry-check - - id: poetry-lock - id: poetry-export args: ["-f", "requirements.txt"] - id: poetry-install diff --git a/CHANGELOG.md b/CHANGELOG.md index c14c1141..000cbf94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,35 @@ +## v1.0.0a2 + +- 修复 `paicorelib` 错误的导入与版本约依赖 + ## v1.0.0a3 -- 添加了示例程序,MNIST双输入端口两层全连接网络 +- 添加示例 - 修复工作流版本错误 - 修复神经元膜电位溢出处理错误 - 修复神经元分组的计数错误 + +## v1.0.0a4 + +- 重命名突触连接类型 `ConnType` 为 `SynConnType` ,现在通过 `pb.SynCnnType.x` 调用。例如,`pb.SynCnnType.All2All` + +## v1.0.0a5 + +- 支持无限嵌套深度的网络 +- 支持全展开2D卷积算子构建与部署(`padding` 不支持) +- 修复当 `keep_shape=True` 时,神经元状态变量在运行时尺寸错误 + +## v1.0.0a6 + +- 新增 `Always1Neuron` 神经元,该神经元将在工作期间持续输出1,不得单独存在,需存在前向突触与其连接。 + +## v1.0.0a7 + +- 支持全展开1D卷积算子构建与部署(`padding` 不支持) + +## v1.0.0b1 + +- 提高 `numpy` 依赖版本至 `^1.24.0` +- 修复神经元输入累加错误 +- 修复当权重为 `np.bool_` 且关闭权重精度优化选项( `weight_bit_optimization` )后,仍视为 `np.int8` 的错误 +- 支持混合精度权重的部署 diff --git a/README.md b/README.md index 935fc03c..813c2896 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ - + @@ -24,3 +24,5 @@ PAIBox使用指南:[Guide-of-PAIBox](docs/Guide-of-PAIBox.md) 高效编写测试项目指南:[Guide-of-Test](docs/Guide-of-Test.md) TODO:[TODO List](./TODO.md) + +[Changelog](./CHANGELOG.md) diff --git a/TODO.md b/TODO.md index ce7e69a6..fb288d8f 100644 --- a/TODO.md +++ b/TODO.md @@ -9,11 +9,25 @@ - [x] `tick_wait_start` 与 `tick_wait_end` 前/后端支持 - [x] 网络节点间自定义延迟( `tick_relative` )前后端支持 - [x] 神经元密集分配策略(待验证) -- [ ] 半折叠卷积算子前后端支持 + +- 功能模块支持 + + - [x] 全展开1D卷积 + - [x] 全展开2D卷积 + - [ ] 逻辑算子:与、或、非 + - [ ] 二维矩阵转置 + - [ ] 三维矩阵转置 + - [ ] 全展开1D转置卷积 + - [ ] 全展开2D转置卷积 + - [ ] 脉冲加、减法 + - [ ] 残差结构 + - [ ] 半折叠2D卷积 - 多层次结构网络组件的发现、数据流验证与后端解析 + - [x] 对有层次结构(子网络例化复用等情形)的网络模型,实现在所有层次上网络组件的发现、数据流与后端解析实现 - - [ ] 局部连接包装成单个算子,实现在所有层次上网络组件的发现、数据流与后端解析实现。考虑用户调用接口 + - [ ] 局部连接包装成单个算子,实现在所有层次上网络组件的发现、数据流与后端解析实现 + - 带有约束的路由坐标分配 - [ ] 支持芯片阵列部署 @@ -28,13 +42,15 @@ - [x] 资源+延时/吞吐率兼顾:在资源优先的基础上,可**叠加使用密集分配策略**,对未分配满的物理核尝试分配同一节点的其他神经元段。相较于单纯使用资源优先策略,使用更少的物理核 - [ ] 自适应策略:根据网络结构,对各层 `unrolling factor` 做全局最优化设置,且预留接口以手动约束某些节点 -- [ ] 混合精度权重支持(已添加权重精度优化的**编译选项**) - - 对于网络中存在不同精度的突触情况,需要在后端实现对不同精度突触分组的处理。预期提供一个编译选项: - - 当开启时,当不同精度突触被分在同一组时,将按照最大精度权重进行,当分在不同组时,按各自权重处理 - - 当关闭时,不同精度突触将**不被分在同一组**(即分割CB时存在约束) +- [ ] 混合精度权重部署优化:对于网络中存在不同精度的突触情况,需要在后端实现对不同精度突触分组的处理。预期提供一个编译选项: + + - [x] 当开启时,当不同精度突触被分在同一组时,将按照最大精度处理;当分在不同组时,按各自精度处理 + - [ ] 当关闭时,不同精度突触将**不被分在同一组**(即对初次分配的CB进行分割,分割后得到的多个CB仍处于同一RG内) + - [ ] ANN模式前后端支持 + - 目前仅支持芯片的SNN模式。要想支持ANN模式,前端上需要支持ANN模式的算理。同时,ANN/SNN模式是物理核的行为,因此当网络拓扑中同时存在两种模式的神经元时,需要以此为约束条件,从而影响分组结果(类似第6点。例如,同为ANN模式下的不同神经元可被分在同一组) + - [ ] 神经元计算模型随机性仿真 + - 基于硬件神经元计算机制,实现带有部分随机数的计算机制。并考虑与硬件的实现相接近的程度(能否接近实现与硬件一致的随机数发生行为) -- [ ] 网络节点间自定义延迟( `tick_relative` )尝试ringbuffer结构优化 - - 目前使用的delay_register方案,将在每个神经元组后方申请较大内存,用于存放delay至后继节点的数据。可尝试使用ringbuffer的思路对此结构进行优化。 diff --git a/docs/Guide-of-PAIBox.md b/docs/Guide-of-PAIBox.md index 2e631565..cbfcde3c 100644 --- a/docs/Guide-of-PAIBox.md +++ b/docs/Guide-of-PAIBox.md @@ -17,8 +17,8 @@ poetry install ```toml python = "^3.8" pydantic = "^2.0" -numpy = "^1.23.0" -paicorelib = "0.0.12" +numpy = "^1.24.0" +paicorelib = "0.0.13" ``` 通过pip安装PAIBox: @@ -53,7 +53,7 @@ PAIBox提供**神经元**与**突触**作为基本组件,用于搭建神经网 PAIBox提供了多种类型的神经元模型,能够实现各种特殊的功能。 -⚠️ 请注意,神经元初始膜电位为0。 +⚠️ 神经元初始膜电位为0。 #### IF神经元 @@ -62,7 +62,7 @@ IF神经元实现了经典的“积分发射”模型,其调用方式及参数 ```python import paibox as pb -n1 = pb.neuron.IF(shape=10, threshold=127, reset_v=0, keep_shape=False, delay=1, tick_wait_start=1, tick_wait_end=0, name='n1') +n1 = pb.IF(shape=10, threshold=127, reset_v=0, keep_shape=False, delay=1, tick_wait_start=1, tick_wait_end=0, name='n1') ``` 其中: @@ -72,8 +72,9 @@ n1 = pb.neuron.IF(shape=10, threshold=127, reset_v=0, keep_shape=False, delay=1, - `reset_v`:神经元的重置膜电位。 - `keep_shape`:是否在仿真记录数据时保持尺寸信息,默认为 `False`。实际进行运算的尺寸仍视为一维。 - `delay`:设定该神经元组输出的延迟。默认为1,即本时间步的计算结果,**下一时间步**传递至后继神经元。 -- `tick_wait_start`: 设定该神经元组在第 `N` 个时间步时启动,0表示不启动。默认为1。 -- `tick_wait_end`: 设定该神经元组持续工作 `M` 个时间步,0表示**永远持续工作**。默认为0。 +- `tick_wait_start`:设定该神经元组在第 `N` 个时间步时启动,0表示不启动。默认为1。 +- `tick_wait_end`:设定该神经元组持续工作 `M` 个时间步,0表示**永远持续工作**。默认为0。 +- `unrolling_factor`:该参数与后端流程相关。展开因子表示神经元将被展开,部署至更多的物理核上,以降低延迟,并提高吞吐率。 - `name`:可选,为该对象命名。 #### LIF神经元 @@ -81,17 +82,17 @@ n1 = pb.neuron.IF(shape=10, threshold=127, reset_v=0, keep_shape=False, delay=1, LIF神经元实现了“泄露-积分-发射”神经元模型,其调用方式及参数如下: ```python -n1 = pb.neuron.LIF(shape=128, threshold=127, reset_v=0, leaky_v=-1, keep_shape=False, name='n1') +n1 = pb.LIF(shape=128, threshold=127, reset_v=0, leak_v=-1, keep_shape=False, name='n1') ``` -- `leaky_v`:LIF神经元的泄露值(有符号)。其他参数含义与IF神经元相同。 +- `leak_v`:LIF神经元的泄露值(有符号)。其他参数含义与IF神经元相同。 #### Tonic Spiking神经元 Tonic Spiking神经元可以实现对持续脉冲刺激的周期性反应。 ```python -n1 = pb.neuron.TonicSpiking(shape=128, fire_step=3, keep_shape=False, name='n1') +n1 = pb.TonicSpiking(shape=128, fire_step=3, keep_shape=False, name='n1') ``` - `fire_step`:发放周期,每接收到 `N` 次刺激后发放脉冲。 @@ -102,7 +103,7 @@ n1 = pb.neuron.TonicSpiking(shape=128, fire_step=3, keep_shape=False, name='n1') import paibox as pb import numpy as np -n1 = pb.neuron.TonicSpiking(shape=1, fire_step=3) +n1 = pb.TonicSpiking(shape=1, fire_step=3) inp_data = np.ones((10,), dtype=np.bool_) output = np.full((10,), 0, dtype=np.bool_) voltage = np.full((10,), 0, dtype=np.int32) @@ -132,7 +133,7 @@ print(output) Phasic Spiking神经元可以实现,在接受一定数量脉冲后发放,然后保持静息状态,不再发放。 ```python -n1 = pb.neuron.PhasicSpiking(shape=128, time_to_fire=3, neg_floor=10, keep_shape=False, name='n1') +n1 = pb.PhasicSpiking(shape=128, time_to_fire=3, neg_floor=10, keep_shape=False, name='n1') ``` - `time_to_fire`:发放时间。 @@ -144,7 +145,7 @@ n1 = pb.neuron.PhasicSpiking(shape=128, time_to_fire=3, neg_floor=10, keep_shape import paibox as pb import numpy as np -n1 = pb.neuron.PhasicSpiking(shape=1, time_to_fire=3) +n1 = pb.PhasicSpiking(shape=1, time_to_fire=3) # [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] inp_data = np.concatenate((np.zeros((2,), np.bool_), np.ones((10,), np.bool_))) output = np.full((12,), 0, dtype=np.bool_) @@ -175,10 +176,12 @@ print(output) ### 突触 +#### 全连接 + PAIBox中,突触用于连接不同神经元组,并包含了连接关系以及权重信息。以全连接类型的突触为实例: ```python -s1= pb.synapses.NoDecay(source=n1, dest=n2, weights=weight1, conn_type=pb.synapses.ConnType.All2All, name='s1') +s1= pb.FullConn(source=n1, dest=n2, weights=weight1, conn_type=pb.SynConnType.All2All, name='s1') ``` 其中: @@ -186,18 +189,19 @@ s1= pb.synapses.NoDecay(source=n1, dest=n2, weights=weight1, conn_type=pb.synaps - `source`:前向神经元组,可以是**神经元或者输入节点**类型。 - `dest`:后向神经元组,只能为**神经元**类型。 - `weights`:突触的权重。 -- `conn_type`:连接形式,默认为 `MatConn` 矩阵连接。当设置为 `All2All` 或 `One2One` 时,`weights` 有更简洁的表达。 +- `conn_type`:连接形式,默认为 `MatConn` 矩阵连接。当设置为 `All2All`、`One2One` 或 `Identity` 时,`weights` 有更简洁的表达。 - `name`:可选,为该对象命名。 突触表达的是两个神经元组之间的连接关系。PAIBox提供了三种主要的连接关系表达: - `All2All`:全连接 - `One2One`:单对单连接 +- `Identity`:恒等映射,同单对单连接,权重为缩放因子标量 - `MatConn`:普通的矩阵连接 通常情况下,`MatConn` 适合所有的连接关系,而 `All2All`、`One2One` 则提供了对于特殊连接更为方便的表达。 -#### All2All 全连接 +##### All2All 全连接 对于全连接,其权重 `weights` 有两种输入类型: @@ -206,30 +210,30 @@ s1= pb.synapses.NoDecay(source=n1, dest=n2, weights=weight1, conn_type=pb.synaps 其中,`N1` 为前向神经元组数目,`N2` 为后向神经元组数目。 -#### One2One 单对单连接 +##### One2One 单对单连接 两组神经元之间依次单对单连接,这要求**前向与后向神经元数目相同**。其权重 `weights` 主要有以下几种输入类型: -- 标量:默认为1。这表示前层的各个神经元输出线性地输入到后层神经元。这种情况等同于 `ConnType.BYPASS` 旁路连接。 +- 标量:默认为1。这表示前层的各个神经元输出线性地输入到后层神经元,即 $\lambda\cdot\mathbf{I}$ ```python - n1 = pb.neuron.IF(shape=5,threshold=1) - n2 = pb.neuron.IF(shape=5,threshold=1) - s1 = pb.synapses.NoDecay(source=n1, dest=n2, conn_type=pb.ConnType.One2One, weights=2, name='s1') + n1 = pb.IF(shape=5, threshold=1) + n2 = pb.IF(shape=5, threshold=1) + s1 = pb.FullConn(source=n1, dest=n2, conn_type=pb.SynConnType.One2One, weights=2, name='s1') print(s1.weights) >>> 2 ``` - 其权重以标量的形式储存。由于在运算时标量会随着矩阵进行广播,因此计算正确且节省了存储开销。 + 其权重以标量的形式储存。 - 数组:尺寸要求为 `(N2,)`,可以自定义每组对应神经元之间的连接权重。如下例所示,设置 `weights` 为 `[1, 2, 3, 4, 5]`, ```python - n1 = pb.neuron.IF(shape=5,threshold=1) - n2 = pb.neuron.IF(shape=5,threshold=1) - s1 = pb.synapses.NoDecay(source=n1, dest=n2, conn_type=pb.ConnType.One2One, weights=np.arange(1, 6, dtype=np.int8), name='s1') + n1 = pb.IF(shape=5, threshold=1) + n2 = pb.IF(shape=5, threshold=1) + s1 = pb.FullConn(source=n1, dest=n2, conn_type=pb.SynConnType.One2One, weights=np.arange(1, 6, dtype=np.int8), name='s1') print(s1.weights) >>> @@ -242,15 +246,55 @@ s1= pb.synapses.NoDecay(source=n1, dest=n2, weights=weight1, conn_type=pb.synaps 其权重实际上为 `N*N` 矩阵,其中 `N` 为前向/后向神经元组数目。 -#### MatConn 一般连接 +##### Identity 恒等映射 + +具有缩放因子的单对单连接,即 `One2One` 中权重项为标量的特殊情况。 + +##### MatConn 一般连接 普通的神经元连接类型,仅可以通过矩阵设置其权重 `weights`。 +#### 1D卷积 + +全展开形式1D卷积为全连接突触的一种特殊表达。对于卷积形式的突触,需**严格指定**前后神经元的维度、卷积核权重、卷积核维度顺序与步长。 + +- `kernel`:卷积核权重。 +- `stride`:步长,标量。 +- `kernel_order`:指定卷积核维度顺序为 `OIL` 或 `IOL` 排列。 +- 神经元维度顺序仅支持 `CL`。 + +```python +n1 = pb.IF(shape=(8, 28), threshold=1) # Input feature map: (8, 28) +n2 = pb.IF(shape=(16, 26), threshold=1) # Output feature map: (16, 26) +kernel = np.random.randint(-128, 128, size=(16, 8, 3), dtype=np.int8) # OIl + +conv2d = pb.Conv1d(n1, n2, kernel=kernel, stride=1, kernel_order="OIL", name="conv1d_1") +``` + +#### 2D卷积 + +全展开形式2D卷积为全连接突触的一种特殊表达。对于卷积形式的突触,需**严格指定**前后神经元的维度、神经元维度顺序、卷积核权重、卷积核维度顺序与步长。 + +- `kernel`:卷积核权重。 +- `stride`:步长,可以为标量或元组。当为标量时,对应为 `(x, x)`;当为元组时,则对应为 `(x, y)`。 +- `kernel_order`:指定卷积核维度顺序为 `OIHW` 或 `IOHW` 排列。 +- 神经元维度顺序仅支持 `CHW`。 + +```python +n1 = pb.IF(shape=(8, 28, 28), threshold=1) # Input feature map: (8, 28, 28) +n2 = pb.IF(shape=(16, 26, 26), threshold=1) # Output feature map: (16, 26, 26) +kernel = np.random.randint(-128, 128, size=(16, 8, 3, 3), dtype=np.int8) # OIHW + +conv2d = pb.Conv2d(n1, n2, kernel=kernel, stride=1, kernel_order="OIHW", name="conv2d_1") +``` + +⚠️ `padding` 不支持,默认为0。 + ### 编码器 对于非脉冲数据,我们需要将其进行脉冲编码,然后输入网络中进行计算。 -PAIBox提供了有状态与无状态编码器。其中,有状态编码器是指编码过程与时间有关,将输入数据编码到一段时间窗口内。而无状态编码器是指编码过程与时间无关。每个时间步,都可以根据输入直接进行编码。 +PAIBox提供了有状态与无状态编码器。其中,有状态编码器是指编码过程与时间有关,将输入数据编码到一段时间窗口内。而无状态编码器是指编码过程与时间无关,每个时间步,都可以根据输入数据进行编码。 #### 无状态编码器 @@ -271,9 +315,11 @@ for t in range(20): #### 有状态编码器 -有状态编码器类别较多。但目前来看,使用传统思路进行训练的SNN网络不能使用与时间有关的有状态编码器进行训练。 +有状态编码器类别较多。PAIBox提供了几种有状态编码器:周期编码器 `PeriodicEncoder`、延迟编码器 `LatencyEncoder` 。 -PAIBox提供了一种有状态编码器,周期性编码器 `PeriodicEncoder`。它以一段脉冲序列为输入,将其循环地在每一个时间步输出。以下为一个简单实例: +##### 周期编码器 + +它以一段脉冲序列为输入,将其循环地在每一个时间步输出。以下为一个简单实例: ```python # 定义一段脉冲序列 @@ -292,6 +338,24 @@ for t in range(20): 这将仿真20个时间步,周期性地获取输入的脉冲序列并将其输出。 +##### 延迟编码器 + +根据输入数据 `x` 延迟发放脉冲的编码器。当刺激强度越大,发放时间越早,且存在最大脉冲发放时间 `T`。因此对于每一个输入数据,都能得到一段时间步长为 `T` 的脉冲序列,每段序列有且仅有一个脉冲发放。编码类型可为:`linear` 或 `log`。以下为一个简单实例: + +```python +N = 6 +x = np.random.rand(N) +T = 20 + +le = pb.simulator.LatencyEncoder(T, "linear") + +out_spike = np.zeros((T, N), dtype=np.bool_) +for t in range(T): + out_spike[t] = le(x) +``` + +具体编码原理参见:[SpikingJelly/延迟编码器](https://spikingjelly.readthedocs.io/zh-cn/latest/activation_based/2_encoding.html#id5) + ### 输入节点 为了支持多样的数据输入形式,同时标明网络模型的输入节点,PAIBox设计了输入节点这一组件。 @@ -344,7 +408,7 @@ print(output) 当启用 `keep_shape` 时,特征图数据将保持其维度信息。 -输入节点的参数也可以是 `np.ndarray` 。以下为一个简单实例: +输入节点的参数也可以是数组。以下为一个简单实例: ```python x = np.random.randint(0, 5, size=(4, 4)) @@ -366,7 +430,7 @@ print(output) #### 函数类型输入 -输入节点支持使用自定义函数作为输入。以下为一个简单实例: +输入节点支持使用函数作为输入。以下为一个简单实例: ```python def fakeout(*args, **kwargs): @@ -388,7 +452,7 @@ print(output) [3 3 3 3]] ``` -当函数需要时间步信息,则可在函数参数中声明 `t` ,输入节点将在前端环境变量 `FRONTEND_ENV` 中获取时间步信息。当需要传入额外的参数时,通过 `FRONTEND_ENV.save()` 保存相关参数至前端环境变量。当函数与时间步或其他参数无关时,可使用 `**kwargs` 代替。以下为一个简单实例: +当函数需要时间步信息,则可在函数中声明传入参数 `t` ,输入节点则在前端环境变量 `FRONTEND_ENV` 中获取时间步信息。当需要传入额外的参数时,通过 `FRONTEND_ENV.save()` 保存相关参数至前端环境变量。当函数与时间步或其他参数无关时,可使用 `**kwargs` 代替。以下为一个简单实例: ```python from paibox import FRONTEND_ENV @@ -426,7 +490,7 @@ print(output) PAIBox提供了一些常用编码器,编码器内部实现了 `__call__` 方法,因此可作为输入节点的输入使用。在作为输入节点的输入使用时,它与一般函数做为输入节点的输入使用存在差别。 -在例化 `InputProj` 时,输入节点的输入为编码器。在运行时,还需要通过设置 `inp.input`,向输入节点输入待编码数据,输入节点内部将自动完成泊松编码并输出。以泊松编码器为例: +在例化 `InputProj` 时,输入节点的输入为编码器。在运行时,还需要通过设置 `inp.input`,**向输入节点输入待编码数据**,节点内部将完成编码并输出。以泊松编码器为例: ```python pe = pb.simulator.PoissonEncoder() # 例化泊松编码器 @@ -452,7 +516,7 @@ print(output) ### 网络模型 -在PAIBox中,神经网络搭建可以通过继承 `DynSysGroup`(或 `Network`,`DynSysGroup` 别名)来实现,并在其中例化神经元与突触组件,完成网络模型的搭建。以一个简单的全连接网络为例: +在PAIBox中,可以通过继承 `DynSysGroup`(或 `Network`)来实现,并在其中例化神经元与突触组件,完成网络模型的构建。以一个简单的两层全连接网络为例:

基础网络搭建-全连接网络示例 @@ -471,10 +535,10 @@ class fcnet(pb.Network): pe = pb.simulator.PoissonEncoder() self.i1 = pb.InputProj(input=pe, shape_out=(784,)) - self.n1 = pb.neuron.IF(128, threshold=128, reset_v=0, tick_wait_start=1) - self.n2 = pb.neuron.IF(10, threshold=128, reset_v=0, tick_wait_start=2) - self.s1 = pb.synapses.NoDecay(self.i1, self.n1, weights=weight1, conn_type=pb.synapses.ConnType.All2All) - self.s2 = pb.synapses.NoDecay(self.n1, self.n2, weights=weight2, conn_type=pb.synapses.ConnType.All2All) + self.n1 = pb.IF(128, threshold=128, reset_v=0, tick_wait_start=1) + self.n2 = pb.IF(10, threshold=128, reset_v=0, tick_wait_start=2) + self.fc1 = pb.FullConn(self.i1, self.n1, weights=weight1, conn_type=pb.SynConnType.All2All) + self.fc2 = pb.FullConn(self.n1, self.n2, weights=weight2, conn_type=pb.SynConnType.All2All) ``` #### 容器类型 @@ -486,44 +550,46 @@ import paibox as pb l1 = pb.NodeList() for i in range(5): - l1.append(pb.neuron.IF(10, threshold=5, reset_v=0)) + l1.append(pb.IF(10, threshold=5, reset_v=0)) for i in range(5): - l1.append(pb.neuron.LIF(10, threshold=5, reset_v=0)) + l1.append(pb.LIF(10, threshold=5, reset_v=0)) ``` 如此,我们共例化了10个神经元,包括5个IF神经元、5个LIF神经元。在容器内的基本组件可通过下标进行访问、与其他基本组件连接。这与一般容器类型的用法相同。 -#### 构建子网络 +#### 嵌套网络 有时网络中会重复出现类似的结构,这时先构建子网络,再多次例化复用是个不错的选择。 ```python +froom typing import Optional import paibox as pb class ReusedStructure(pb.Network): - def __init__(self, weight): - super().__init__() - self.pre_n = pb.LIF((10,), 10) - self.post_n = pb.LIF((10,), 10) - self.syn = pb.NoDecay( - self.pre_n, self.post_n, conn_type=pb.synapses.ConnType.All2All, weights=weight + def __init__(self, weight, tws, name: Optional[str] = None): + super().__init__(name=name) + + self.pre_n = pb.LIF((10,), 10, tick_wait_start=tws) + self.post_n = pb.LIF((10,), 10, tick_wait_start=tws+1) + self.fc = pb.FullConn( + self.pre_n, self.post_n, conn_type=pb.SynConnType.All2All, weights=weight ) class Net(pb.Network): def __init__(self, w1, w2): self.inp1 = pb.InputProj(1, shape_out=(10,)) - subnet1 = ReusedStructure(w1) - subnet2 = ReusedStructure(w2) - self.s1 = pb.NoDecay( + subnet1 = ReusedStructure(w1, tws=1, name="Reused_Struct_0") + subnet2 = ReusedStructure(w2, tws=3, name="Reused_Struct_1") + self.fc1 = pb.FullConn( self.inp1, subnet1.pre_n, - conn_type=pb.synapses.ConnType.One2One, + conn_type=pb.SynConnType.One2One, ) - self.s2 = pb.NoDecay( + self.fc2 = pb.FullConn( subnet1.post_n, subnet2.pre_n, - conn_type=pb.synapses.ConnType.One2One, + conn_type=pb.SynConnType.One2One, ) super().__init__(subnet1, subnet2) # Necessary! @@ -533,7 +599,7 @@ w2 = ... net = Net(w1, w2) ``` -上述示例代码中,我们先创建需复用的子网络 `ReusedStructure`,其结构为 `pre_n` -> `syn` -> `post_n`。而后,在父网络 `Net` 中实例化两个子网络 `subnet1`、 `subnet2`,并与父网络其他部分连接,此时网络结构为:`inp1` -> `s1` -> `subnet1` -> `s2` -> `subnet2`。最后,在为 `pb.Network` 初始化时,传入子网络 `subnet1`、 `subnet2`。由此,父网络 `Net` 才能发现子网络组件。 +上述示例代码中,我们先创建需复用的子网络 `ReusedStructure`,其结构为 `pre_n` -> `fc` -> `post_n`。而后,在父网络 `Net` 中实例化两个子网络 `subnet1`、 `subnet2`,并与父网络其他部分连接,此时网络结构为:`inp1` -> `fc1` -> `subnet1` -> `fc22` -> `subnet2`。最后,在为 `pb.Network` 初始化时,传入子网络 `subnet1`、 `subnet2`。由此,父网络 `Net` 才能发现子网络组件。如果想取到 `Net` 内的 `subnet1` 对象,可通过索引其名字 `Net["Reused_Struct_0"]` 取到。 上述示例为一个二级嵌套网络,对于三级嵌套网络或更高(不推荐使用),可参考上述方式构建。 @@ -589,7 +655,7 @@ sim.add_probe(probe2) 可监测的对象包括网络内部所有的属性。例如,神经元及突触的各类属性,常用的监测对象包括: - 输入节点的 `feature_map`。 -- 神经元:脉冲输出 `spike` (本层神经元产生的脉冲,但不一定传递至后继神经元)、基于硬件寄存器的**输出** `output`(大小为 `256*N` )、特征图形式的脉冲输出 `feature_map `、膜电位 `voltage`。 +- 神经元:脉冲输出 `spike` 、基于硬件寄存器的**输出** `output`(大小为 `256*N` )、特征图形式的脉冲输出 `feature_map `、膜电位 `voltage`。 - 突触:输出 `output`。 ### 仿真机理 @@ -631,7 +697,7 @@ sim.reset() mapper = pb.Mapper() mapper.build(fcnet) graph_info = mapper.compile(weight_bit_optimization=True, grouping_optim_target="both") -mapper.export(write_to_file=True, fp="./debug/", format="npy", split_by_coordinate=False, local_chip_addr=(0, 0), export_core_params=False) +mapper.export(write_to_file=True, fp="./debug/", format="bin", split_by_coordinate=False, local_chip_addr=(0, 0), export_core_params=False) # Clear all the results mapper.clear() @@ -639,7 +705,7 @@ mapper.clear() 其中,编译时有如下参数可指定: -- `weight_bit_optimization`: 是否对权重精度进行优化处理。例如,将声明时为 INT8 的权重根据实际值当作更小的精度处理(当权重的值均在 [-8, 7] 之间,则可当作 INT4 进行处理)。默认由后端配置项内对应**编译选项**指定(默认开启)。 +- `weight_bit_optimization`: 是否对权重精度进行优化处理。这将使得声明时为 INT8 的权重根据实际值当作更小的精度处理(当权重的值均在 [-8, 7] 之间,则可当作 INT4 进行处理)。默认由后端配置项内对应**编译选项**指定(默认开启)。 - `grouping_optim_target`:指定神经元分组的优化目标,可以为 `"latency"`,`"core"` 或 `"both"`,分别代表以延时/吞吐率、占用核资源为优化目标、或二者兼顾。默认由后端配置项内对应**编译选项**指定(默认为 `both`)。 - 同时,该方法将返回字典形式的编译后网络的信息。 @@ -648,20 +714,28 @@ mapper.clear() - `write_to_file`: 是否将配置帧导出为文件。默认为 `True`。 - `fp`:导出目录。若未指定,则默认为后端配置选项 `build_directory` 所设置的目录(当前工作目录)。 - `format`:导出交换文件格式,可以为 `bin`、`npy` 或 `txt`。默认为 `bin`。 -- `split_by_coordinate`:是否将配置帧以每个核坐标进行分割,由此生成的配置帧文件命名为"config_core1"、"config_core2"等。默认为 `False`。 -- `local_chip_addr`:本地芯片地址,元组格式表示。默认为后端配置项 `local_chip_addr` 所设置的默认值。 +- `split_by_coordinate`:是否将配置帧以每个核坐标进行分割,由此生成的配置帧文件命名为"config_core1"、"config_core2"等。默认为 `False`,即最终导出为一个文件。 +- `local_chip_addr`:本地芯片坐标,元组格式表示。默认为后端配置项 `local_chip_addr` 所设置的默认值。 - `export_core_params`: 是否导出实际使用核参数至json文件,以直观显示实际使用核的配置信息。默认为 `False`。 -- 同时,该方法将返回模型的配置项字典。 + +同时,该方法将返回模型的配置项字典 `GraphInfo`,包括: + +- `input`:输入节点信息字典。 +- `output`:输出目的地信息字典。 +- `memebers`:中间层所在物理核的配置项字典。 +- `inherent_timestep`:网络模型的最长时间步。 +- `n_core_required`:网络模型需要的物理核数目。 +- `extras`:其他额外的网络信息字典,例如,编译后的网络名称。 ### 后端配置项 与后端相关的配置项由 `BACKEND_CONFIG` 统一保存与访问,例如上述**编译选项**、`build_directory`、`local_chip_addr` 等。常用的配置项有如下: ```python -BACKEND_CONFIG.local_chip_addr +BACKEND_CONFIG.local_chip_addr # 本地芯片坐标 >>> Coord(0, 0) -BACKEND_CONFIG.test_chip_addr +BACKEND_CONFIG.test_chip_addr # 测试芯片坐标(一般是FPGA) >>> Coord(1, 0) # Set output directory diff --git a/docs/images/.gitignore b/docs/images/.gitignore deleted file mode 100644 index 8e062ad3..00000000 --- a/docs/images/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.drawio diff --git a/examples/mnist/example1/data/mnist_input_data7.npy b/examples/mnist/data/mnist_input_data7.npy similarity index 100% rename from examples/mnist/example1/data/mnist_input_data7.npy rename to examples/mnist/data/mnist_input_data7.npy diff --git a/examples/mnist/example1/README.md b/examples/mnist/example1/README.md index 80ad3aac..2a19f296 100644 --- a/examples/mnist/example1/README.md +++ b/examples/mnist/example1/README.md @@ -1,7 +1,7 @@ -该示例构建了双输入节点的两层全连接网络,进行仿真测试与后端部署,可实现MNIST手写字体识别。 +该示例构建了双输入节点的两层全连接网络。 - 权重及阈值:`./weights` -- 测试数据:`./data`,数字7 +- 测试数据:`../data`,数字7 - 网络结构:

MNIST-ex1 网络结构 diff --git a/examples/mnist/example1/examples-mnist-ex1-network.png b/examples/mnist/example1/examples-mnist-ex1-network.png index 46009cb5..4a164107 100644 Binary files a/examples/mnist/example1/examples-mnist-ex1-network.png and b/examples/mnist/example1/examples-mnist-ex1-network.png differ diff --git a/examples/mnist/example1/main.py b/examples/mnist/example1/main.py index 734b6030..d5d713e8 100644 --- a/examples/mnist/example1/main.py +++ b/examples/mnist/example1/main.py @@ -2,13 +2,59 @@ from pathlib import Path import numpy as np -from model import fcnet_2layer_dual_port import paibox as pb +class fcnet_2layer_dual_port(pb.Network): + def __init__(self, weight1, Vthr1, weight2, Vthr2): + super().__init__() + + pe = pb.simulator.PoissonEncoder() + self.i1 = pb.InputProj(input=pe, shape_out=(392,)) + self.i2 = pb.InputProj(input=pe, shape_out=(392,)) + self.n1 = pb.IF(128, threshold=Vthr1, reset_v=0) + self.s1 = pb.FullConn( + self.i1, + self.n1, + weights=weight1[:392], + conn_type=pb.SynConnType.All2All, + ) + self.s2 = pb.FullConn( + self.i2, + self.n1, + weights=weight1[392:], + conn_type=pb.SynConnType.All2All, + ) + + # tick_wait_start = 2 for second layer + self.n2 = pb.IF( + 5, threshold=Vthr2, reset_v=0, tick_wait_start=2, name="batch_dual_port_o1" + ) + self.n3 = pb.IF( + 5, threshold=Vthr2, reset_v=0, tick_wait_start=2, name="batch_dual_port_o2" + ) + self.s3 = pb.FullConn( + self.n1, + self.n2, + weights=weight2[:, :5], + conn_type=pb.SynConnType.All2All, + ) + self.s4 = pb.FullConn( + self.n1, + self.n3, + weights=weight2[:, 5:], + conn_type=pb.SynConnType.All2All, + ) + + self.probe1 = pb.Probe(target=self.n2, attr="spike") + self.probe2 = pb.Probe(target=self.n3, attr="spike") + + +param_dict = {} + + def getNetParam(): - param_dict = {} timestep = 4 layer_num = 2 delay = layer_num - 1 @@ -27,8 +73,6 @@ def getNetParam(): param_dict["w2"] = w2 param_dict["vthr2"] = vthr2 - return param_dict - if __name__ == "__main__": parser = argparse.ArgumentParser() @@ -37,7 +81,7 @@ def getNetParam(): ) args = parser.parse_args() - param_dict = getNetParam() + getNetParam() pb_net = fcnet_2layer_dual_port( param_dict["w1"], param_dict["vthr1"], @@ -46,8 +90,8 @@ def getNetParam(): ) # Network simulation - raw_data = np.load("./data/mnist_input_data7.npy") - input_data = raw_data.flatten() + raw_data = np.load(Path(__file__).parent.parent / "data" / "mnist_input_data7.npy") + input_data = raw_data.ravel() # Visualize if args.verbose: @@ -79,6 +123,10 @@ def getNetParam(): graph_info = mapper.compile( weight_bit_optimization=True, grouping_optim_target="both" ) + + # #N of cores required + print("Core required:", graph_info["n_core_required"]) + mapper.export( write_to_file=True, fp=out_dir / "debug", diff --git a/examples/mnist/example1/model.py b/examples/mnist/example1/model.py deleted file mode 100644 index 566ec65c..00000000 --- a/examples/mnist/example1/model.py +++ /dev/null @@ -1,46 +0,0 @@ -import paibox as pb - - -class fcnet_2layer_dual_port(pb.Network): - def __init__(self, weight1, Vthr1, weight2, Vthr2): - super().__init__() - - pe = pb.simulator.PoissonEncoder() - self.i1 = pb.InputProj(input=pe, shape_out=(392,)) - self.i2 = pb.InputProj(input=pe, shape_out=(392,)) - self.n1 = pb.neuron.IF(128, threshold=Vthr1, reset_v=0) - self.s1 = pb.synapses.NoDecay( - self.i1, - self.n1, - weights=weight1[:392], - conn_type=pb.synapses.ConnType.All2All, - ) - self.s2 = pb.synapses.NoDecay( - self.i2, - self.n1, - weights=weight1[392:], - conn_type=pb.synapses.ConnType.All2All, - ) - - # tick_wait_start = 2 for second layer - self.n2 = pb.neuron.IF( - 5, threshold=Vthr2, reset_v=0, tick_wait_start=2, name="batch_dual_port_o1" - ) - self.n3 = pb.neuron.IF( - 5, threshold=Vthr2, reset_v=0, tick_wait_start=2, name="batch_dual_port_o2" - ) - self.s3 = pb.synapses.NoDecay( - self.n1, - self.n2, - weights=weight2[:, :5], - conn_type=pb.synapses.ConnType.All2All, - ) - self.s4 = pb.synapses.NoDecay( - self.n1, - self.n3, - weights=weight2[:, 5:], - conn_type=pb.synapses.ConnType.All2All, - ) - - self.probe1 = pb.simulator.Probe(target=self.n2, attr="spike") - self.probe2 = pb.simulator.Probe(target=self.n3, attr="spike") diff --git a/examples/mnist/example2/README.md b/examples/mnist/example2/README.md new file mode 100644 index 00000000..f02cb7a0 --- /dev/null +++ b/examples/mnist/example2/README.md @@ -0,0 +1,8 @@ +该示例构建了常规的卷积网络,由两层卷积与一层全连接组成。 + +- 权重及阈值:`./weights` +- 测试数据:`../data`,数字7 +- 网络结构: +

+ MNIST-ex2 网络结构 +

diff --git a/examples/mnist/example2/examples-mnist-ex2-network.png b/examples/mnist/example2/examples-mnist-ex2-network.png new file mode 100644 index 00000000..59b64323 Binary files /dev/null and b/examples/mnist/example2/examples-mnist-ex2-network.png differ diff --git a/examples/mnist/example2/main.py b/examples/mnist/example2/main.py new file mode 100644 index 00000000..0b87a0bb --- /dev/null +++ b/examples/mnist/example2/main.py @@ -0,0 +1,119 @@ +import argparse +from pathlib import Path + +import numpy as np + +import paibox as pb + + +class Conv2d_Net(pb.Network): + def __init__(self, weight1, Vthr1, weight2, Vthr2, weight3, Vthr3): + super().__init__() + + pe = pb.simulator.PoissonEncoder() + self.i1 = pb.InputProj(input=pe, shape_out=(1, 28, 28)) + self.n1 = pb.IF((2, 26, 26), threshold=Vthr1, reset_v=0) + self.conv2d_1 = pb.Conv2d(self.i1, self.n1, kernel=weight1, stride=1) + + self.n2 = pb.IF((4, 24, 24), threshold=Vthr2, reset_v=0, tick_wait_start=2) + self.conv2d_2 = pb.Conv2d(self.n1, self.n2, kernel=weight2, stride=1) + + self.n3 = pb.IF(10, threshold=Vthr3, reset_v=0, tick_wait_start=3) + self.fc1 = pb.FullConn( + self.n2, self.n3, weights=weight3, conn_type=pb.SynConnType.All2All + ) + + self.probe1 = pb.Probe(self.n3, "spike") + + +param_dict = {} + + +def getNetParam(): + timestep = 8 + layer_num = 3 + delay = layer_num - 1 + + weights_dir = Path("./weights") + w1 = np.load(weights_dir / "weight_conv1.npy").astype(np.int8) + vthr1 = int(np.load(weights_dir / "Vthr_conv1.npy") / 1.0) + w2 = np.load(weights_dir / "weight_conv2.npy").astype(np.int8) + vthr2 = int(np.load(weights_dir / "Vthr_conv2.npy") / 1.0) + w3 = np.load(weights_dir / "weight_fc1.npy").astype(np.int8).T + vthr3 = int(np.load(weights_dir / "Vthr_fc1.npy") / 1.0) + + param_dict["timestep"] = timestep + param_dict["layer_num"] = layer_num + param_dict["delay"] = delay + param_dict["w1"] = w1 + param_dict["vthr1"] = vthr1 + param_dict["w2"] = w2 + param_dict["vthr2"] = vthr2 + param_dict["w3"] = w3 + param_dict["vthr3"] = vthr3 + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "-v", "--verbose", help="visualize the input data", action="store_true" + ) + args = parser.parse_args() + + getNetParam() + pb_net = Conv2d_Net( + param_dict["w1"], + param_dict["vthr1"], + param_dict["w2"], + param_dict["vthr2"], + param_dict["w3"], + param_dict["vthr3"], + ) + + # Network simulation + raw_data = np.load(Path(__file__).parent.parent / "data" / "mnist_input_data7.npy") + input_data = raw_data.ravel() + + # Visualize + if args.verbose: + pe = pb.simulator.PoissonEncoder() + data_to_see = pe(raw_data).astype(np.int8) + print(data_to_see) + + # Input + pb_net.i1.input = input_data + + # Simulation, duration=timestep + delay + sim = pb.Simulator(pb_net) + sim.run(param_dict["timestep"] + param_dict["delay"], reset=False) + + # Decode the output + spike_out = sim.data[pb_net.probe1].astype(np.int8) + spike_out = spike_out[param_dict["delay"] :] + spike_sum = spike_out.sum(axis=0) + pred = np.argmax(spike_sum) + + assert pred == 7, print("failed") # Correct result is 7 + + out_dir = Path(__file__).parent + + mapper = pb.Mapper() + mapper.build(pb_net) + graph_info = mapper.compile( + weight_bit_optimization=True, grouping_optim_target="both" + ) + + # #N of cores required + print("Core required:", graph_info["n_core_required"]) + + mapper.export( + write_to_file=True, + fp=out_dir / "debug", + format="npy", + split_by_coordinate=True, + local_chip_addr=(0, 0), + export_core_params=False, + ) + + # Clear all the results + mapper.clear() diff --git a/examples/mnist/example2/weights/Vthr_conv1.npy b/examples/mnist/example2/weights/Vthr_conv1.npy new file mode 100644 index 00000000..2a18baee Binary files /dev/null and b/examples/mnist/example2/weights/Vthr_conv1.npy differ diff --git a/examples/mnist/example2/weights/Vthr_conv2.npy b/examples/mnist/example2/weights/Vthr_conv2.npy new file mode 100644 index 00000000..6172d3e5 Binary files /dev/null and b/examples/mnist/example2/weights/Vthr_conv2.npy differ diff --git a/examples/mnist/example2/weights/Vthr_fc1.npy b/examples/mnist/example2/weights/Vthr_fc1.npy new file mode 100644 index 00000000..3b1ce3f2 Binary files /dev/null and b/examples/mnist/example2/weights/Vthr_fc1.npy differ diff --git a/examples/mnist/example2/weights/weight_conv1.npy b/examples/mnist/example2/weights/weight_conv1.npy new file mode 100644 index 00000000..5086aa21 Binary files /dev/null and b/examples/mnist/example2/weights/weight_conv1.npy differ diff --git a/examples/mnist/example2/weights/weight_conv2.npy b/examples/mnist/example2/weights/weight_conv2.npy new file mode 100644 index 00000000..d9c9743c Binary files /dev/null and b/examples/mnist/example2/weights/weight_conv2.npy differ diff --git a/examples/mnist/example2/weights/weight_fc1.npy b/examples/mnist/example2/weights/weight_fc1.npy new file mode 100644 index 00000000..b5a8e71e Binary files /dev/null and b/examples/mnist/example2/weights/weight_fc1.npy differ diff --git a/paibox/__init__.py b/paibox/__init__.py index 863ef168..08c35d8f 100644 --- a/paibox/__init__.py +++ b/paibox/__init__.py @@ -1,3 +1,5 @@ +from importlib.metadata import version + from .backend import BACKEND_CONFIG as BACKEND_CONFIG from .backend import Mapper as Mapper from .base import * @@ -10,24 +12,38 @@ from .projection import InputProj as InputProj from .simulator import Probe as Probe from .simulator import Simulator as Simulator +from .synapses import Conv1d as Conv1d +from .synapses import Conv2d as Conv2d +from .synapses import FullConn as FullConn +from .synapses import GeneralConnType as SynConnType from .synapses import NoDecay as NoDecay -__all__ = [ - "Mapper", - "DynSysGroup", - "Network", - "NodeDict", - "NodeList", - "InputProj", - "Simulator", - "Probe", - "BACKEND_CONFIG", - "FRONTEND_ENV", -] - -from importlib.metadata import version - try: __version__ = version("paibox") except Exception: __version__ = None + +from paibox import tools + +# Minimum required version of paicorelib +__plib_minimum_version__ = "0.0.13" + +try: + import paicorelib as plib + + if hasattr(plib, "get_version"): # For plib <= 0.0.12 + raise ImportError( + tools.PLIB_UPDATE_INTRO.format( + __plib_minimum_version__, ".".join(map(str, plib.get_version())) # type: ignore + ) + ) from None + + if plib.__version__ < __plib_minimum_version__: # For plib > 0.0.12 + raise ImportError( + tools.PLIB_UPDATE_INTRO.format(__plib_minimum_version__, plib.__version__) + ) from None + + del tools, plib + +except ModuleNotFoundError: + raise ModuleNotFoundError(tools.PLIB_INSTALL_INTRO) from None diff --git a/paibox/backend/conf_template.py b/paibox/backend/conf_template.py index a5e6490b..0000ebca 100644 --- a/paibox/backend/conf_template.py +++ b/paibox/backend/conf_template.py @@ -2,7 +2,7 @@ from dataclasses import dataclass from enum import Enum from pathlib import Path -from typing import Any, Dict, List, Literal, NamedTuple, TypedDict +from typing import Any, ClassVar, Dict, List, Literal, NamedTuple, TypedDict import numpy as np from numpy.typing import NDArray @@ -10,6 +10,7 @@ LCN_EX, AxonCoord, Coord, + HwConfig, InputWidthFormat, MaxPoolingEnable, NeuronAttrs, @@ -23,8 +24,8 @@ WeightPrecision, get_replication_id, ) +from paicorelib.framelib import types from paicorelib.framelib.frame_gen import OfflineFrameGen -from paicorelib.framelib.types import FRAME_DTYPE, FrameArrayType from paicorelib.framelib.utils import np2bin, np2npy, np2txt from typing_extensions import NotRequired, TypeAlias @@ -33,6 +34,17 @@ from .context import _BACKEND_CONTEXT from .graphs_types import NodeName +# Prevent import errors caused by changes in type definitions in paicorelib. +if hasattr(types, "FRAME_DTYPE"): + FRAME_DTYPE = types.FRAME_DTYPE +else: + FRAME_DTYPE = np.uint64 + +if hasattr(types, "FrameArrayType"): + FrameArrayType = types.FrameArrayType +else: + FrameArrayType = NDArray[FRAME_DTYPE] + class CoreConfig(NamedTuple): """Configurations of core.""" @@ -154,8 +166,8 @@ def encapsulate( - addr_offset: offset of the RAM address. - axon_segs: the destination axon segments. - dest_core_coords: coordinates of the core of the destination axons. - - dest_chip_coord: coordinate of the chip of the destination axons. \ - The default is `output_chip_addr` in the backend context. + - dest_chip_coord: coordinate of the chip of the destination axons. Default is \ + `output_chip_addr` in the backend context. """ attrs = NeuronAttrs.model_validate(neuron.export_params(), strict=True) dest_rid = get_replication_id(dest_core_coords) @@ -243,6 +255,22 @@ def __json__(self) -> Dict[str, Any]: return dict_ +class EmptyCorePlacementConfig(CorePlacementConfig): + _default_seed: ClassVar[int] = 0 + _default_zero_wram: ClassVar[NDArray[np.uint64]] = np.zeros( + (HwConfig.ADDR_RAM_MAX, 18), dtype=np.uint64 + ) + + @classmethod + def encapsulate(cls, core_config: CoreConfig): + return cls( + cls._default_seed, + cls._default_zero_wram, + ParamsReg.model_validate(core_config._asdict(), strict=True), + {}, + ) + + InputNodeInfo: TypeAlias = Dict[NodeName, NeuronDest] OutputDestInfo: TypeAlias = Dict[NodeName, Dict[int, NeuronDestInfo]] CorePlacementInfo: TypeAlias = Dict[Coord, CorePlacementConfig] @@ -260,8 +288,8 @@ class GraphInfo(TypedDict): inherent_timestep: int n_core_required: int """The actual used cores.""" - # n_core_occupied: int - # """The occupied cores, including used & wasted.""" + n_core_occupied: int + """The occupied cores, including used & wasted.""" extras: NotRequired[Dict[str, Any]] @@ -271,7 +299,7 @@ def gen_config_frames_by_coreconf( write_to_file: bool, fp: Path, split_by_coord: bool, - format: Literal["txt", "bin", "npy"] = "bin", + format: Literal["txt", "bin", "npy"], ) -> Dict[Coord, FrameArrayType]: """Generate configuration frames by given the `CorePlacementConfig`. @@ -281,10 +309,10 @@ def gen_config_frames_by_coreconf( - write_to_file: whether to write frames to file. - fp: If `write_to_file` is `True`, specify the path. - split_by_coord: whether to split the generated frames file by the core coordinates. - - format: it can be `txt`, `bin`, or `npy`. `bin` & `npy` are recommended. + - format: `txt`, `bin`, or `npy`. """ - def _write_to_f(name: str, array: np.ndarray) -> None: + def _write_to_f(name: str, array: FrameArrayType) -> None: nonlocal fp, format _fp = fp / (name + f".{format}") @@ -316,7 +344,7 @@ def _write_to_f(name: str, array: np.ndarray) -> None: v.params_reg, ) - # 3. Iterate all the neuron segments in the function inside + # 3. Iterate all the neuron segments inside the physical core. config_frame_type3 = [] for neu_conf in v.neuron_configs.values(): config_frame_type3.append( @@ -333,20 +361,26 @@ def _write_to_f(name: str, array: np.ndarray) -> None: ) ) - frame3 = np.concatenate( - [f.value for f in config_frame_type3], dtype=FRAME_DTYPE - ) + if config_frame_type3: + frame3 = np.concatenate( + [f.value for f in config_frame_type3], dtype=FRAME_DTYPE, casting="no" + ) + else: + frame3 = np.asarray([], dtype=FRAME_DTYPE) # 4. Only one config frame type IV for each physical core. n_addr_write = v.params_reg.num_dendrite # The number of address to write - config_frame_type4 = OfflineFrameGen.gen_config_frame4( - target_chip_coord, - core_coord, - _default_rid, - 0, - 18 * n_addr_write, - v.weight_ram[:n_addr_write], - ) + if n_addr_write > 0: + config_frame_type4 = OfflineFrameGen.gen_config_frame4( + target_chip_coord, + core_coord, + _default_rid, + 0, + 18 * n_addr_write, + v.weight_ram[:n_addr_write], + ) + else: + config_frame_type4 = None _debug_dict[core_coord] = { "config1": config_frame_type1, @@ -355,15 +389,23 @@ def _write_to_f(name: str, array: np.ndarray) -> None: "config4": config_frame_type4, } - frame_arrays_on_core[core_coord] = np.concatenate( - [ - config_frame_type1.value, - config_frame_type2.value, - frame3, - config_frame_type4.value, - ], - dtype=FRAME_DTYPE, - ) + if config_frame_type4: + frame_arrays_on_core[core_coord] = np.concatenate( + [ + config_frame_type1.value, + config_frame_type2.value, + frame3, + config_frame_type4.value, + ], + dtype=FRAME_DTYPE, + casting="no", + ) + else: + frame_arrays_on_core[core_coord] = np.concatenate( + [config_frame_type1.value, config_frame_type2.value, frame3], + dtype=FRAME_DTYPE, + casting="no", + ) if write_to_file: if split_by_coord: @@ -371,7 +413,9 @@ def _write_to_f(name: str, array: np.ndarray) -> None: addr = core_coord.address _write_to_f(f"config_core{addr}", f) else: - _f = np.concatenate(list(frame_arrays_on_core.values()), dtype=FRAME_DTYPE) + _f = np.concatenate( + list(frame_arrays_on_core.values()), dtype=FRAME_DTYPE, casting="no" + ) _write_to_f(f"config_cores_all", _f) return frame_arrays_on_core diff --git a/paibox/backend/experimental/coreblock.py b/paibox/backend/experimental/coreblock.py deleted file mode 100644 index 1687880c..00000000 --- a/paibox/backend/experimental/coreblock.py +++ /dev/null @@ -1,96 +0,0 @@ -from typing import List, NamedTuple, NewType, Sequence - -from paibox.base import NeuDyn - - -class NeuronSegment(NamedTuple): - parent: NeuDyn - """指示这个对象是描述的哪个神经元组""" - index: slice - """指示这段神经元对应的下标范围(一定连续分配)""" - addr_ram: slice - """分配到在物理核内的RAM坐标范围(一定连续分配)""" - - -NeuronSegments = NewType("NeuronSegments", List[NeuronSegment]) # one_core - - -def get_neuron_segments_1( - neurons: Sequence[NeuDyn], capacity: int -) -> List[NeuronSegments]: - result = [] - - for n in neurons: - num = n.num_out - i = 0 - - while i < (num - 1) // capacity: - segment = NeuronSegment( - n, slice(i * capacity, capacity * (i + 1), 1), slice(0, capacity, 1) - ) - - result.append([segment]) - i += 1 - - segment = NeuronSegment( - n, slice(i * capacity, num, 1), slice(0, num - (i * capacity), 1) - ) - result.append([segment]) - - return result - - -def get_neuron_segments_2( - neurons: Sequence[NeuDyn], capacity: int -) -> List[NeuronSegments]: - result = [] - segments_of_neurons = get_neuron_segments_1(neurons, capacity) - temp = [] - - sum = 0 - for segs in segments_of_neurons: - if segs[0].addr_ram.stop < capacity: - temp.append(segs[0]) - sum += segs[0].addr_ram.stop - else: - result.append(segs) - - temp.sort(key=lambda seg: seg.addr_ram.stop) - - i = 0 # 剩余部分可组成的物理核个数 - j = 0 # 有剩余的的物理核 - while i < (sum - 1) // capacity + 1: - segments = NeuronSegments([]) - full = 0 - empty = capacity - full - - while empty > 0 and j < len(temp): - if empty >= temp[j].addr_ram.stop: - segment = NeuronSegment( - temp[j].parent, - temp[j].index, - slice(full, full + temp[j].addr_ram.stop, 1), - ) - segments.append(segment) - full += temp[j].addr_ram.stop - empty = capacity - full - j += 1 - else: - segment = NeuronSegment( - temp[j].parent, - slice(temp[j].index.start, temp[j].index.start + empty, 1), - slice(full, capacity, 1), - ) - segments.append(segment) - temp[j] = NeuronSegment( - temp[j].parent, - slice(temp[j].index.start + empty, temp[j].index.stop, 1), - slice(0, temp[j].addr_ram.stop - empty, 1), - ) - full += capacity - empty = 0 - - i += 1 - result.append(segments) - - return result diff --git a/paibox/backend/experimental/routing.py b/paibox/backend/experimental/routing.py deleted file mode 100644 index 8d40e70c..00000000 --- a/paibox/backend/experimental/routing.py +++ /dev/null @@ -1,469 +0,0 @@ -from typing import Any, List, Optional, Sequence, final - -from paicorelib.v2.routing_defs import RoutingDirection as Direction -from paicorelib.v2.routing_defs import RoutingDirectionIdx as DirectionIdx -from paicorelib.v2.routing_defs import RoutingNodeLevel as Level -from paicorelib.v2.routing_defs import RoutingNodeStatus as NodeStatus -from paicorelib.v2.routing_defs import get_node_consumption - -from ...exceptions import NotSupportedError -from ..placement import CorePlacement - -""" - This is an alternative to the routing tree that \ - does the same thing as the development version \ - but is more complex. - - Some functions are still not implemented and \ - will not be developed until the solution is \ - reconsidered later. -""" - - -class RoutingNode: - def __init__( - self, - level: Level, - data: Optional[Any] = None, - *, - tag: Optional[str] = None, - ) -> None: - """Instance a tree node with `level`. \ - For a node with level Lx > 0, after created, \ - the length of children is `node_capacity`. - - For a node with level L0, it is a leaf node. - - Args: - - level: the node level. - - data: the data hanging on the node. Optional. - - tag: a tag for user to identify. Optional. - """ - self._level = level - self._children: List["RoutingNode"] = [] - self.item = data - self.tag = tag - - self._status = NodeStatus.ALL_EMPTY - - def add_item(self, data: Any) -> None: - """Add data to its item. Only used for L0-level node.""" - self.item = data - self._status = NodeStatus.OCCUPIED - - def add_child(self, child: "RoutingNode") -> bool: - if self.level == Level.L0: - # L0-level node cannot add child. - raise AttributeError(f"L0-level node cannot add child") - - if self.level - child.level != 1: - raise AttributeError( - f"The node with level {child.level} can not be a child" - ) - - if self.is_full(): - return False - - self._children.append(child) - - return True - - def get_avail_child(self, method: str = "nearest") -> Optional["RoutingNode"]: - if self.is_children_all_status(NodeStatus.OCCUPIED): - return None - - for child in self.children: - if child.status != NodeStatus.OCCUPIED: - return child - - def find_node_by_path(self, path: Sequence[Direction]) -> "RoutingNode": - """Find node by the path of `Direction`. - - Description: - Find by starting at this level based on the path provided. \ - Take `path[0]` each time and then do a recursive search. - - NOTE: The length of path <= the level of this node. - """ - if len(path) == 0: - return self - - if len(path) > self.level: - raise ValueError( - f"The length of path {len(path)} > level of node {self.level}" - ) - - idx = path[0].to_index() - if idx > len(self.children) - 1: - raise IndexError(f"Index out of range: {idx} > {len(self.children) - 1}") - - sub_node = self.children[idx] - - if len(path) > 1: - return sub_node.find_node_by_path(path[1:]) - else: - return sub_node - - def find_node_by_tag(self, tag: str) -> Optional["RoutingNode"]: - """Searches for nodes by tag using DFS. - - Args: - - tag: the tag string. - - Returns: - - the node if found. Otherwise return `None`. - """ - - def dfs_preorder(root: RoutingNode, tag: str) -> Optional[RoutingNode]: - if root.tag == tag: - return root - elif root.level == Level.L0: - return None - else: - for child in root.children: - node = dfs_preorder(child, tag) - if node: - return node - - return dfs_preorder(self, tag) - - def get_node_path(self, node: "RoutingNode") -> List[Direction]: - """Return a direction path from L4 to the level of `node`. - - Args: - - node: the node with level <= `self.level`. - - Return: - - A list of `Direction` from L4 to L0. - """ - if node.level > self.level: - raise ValueError(f"The node with level {node.level} is not in self") - - if node.level == self.level: - if node != self: - raise ValueError(f"The node with level {node.level} is not in self") - - return [] - - path = [] - - def dfs_preorder(root: RoutingNode) -> bool: - i = 0 - for child in root.children: - path.append(DirectionIdx[i]) - if child is node: - return True - else: - if dfs_preorder(child): - return True - else: - path.pop(-1) - - i += 1 - - return False - - if dfs_preorder(self): - return path - else: - raise ValueError(f"The node with level {node.level} is not in self") - - def get_lx_nodes(self, lx: Level, method: str = "nearest") -> List["RoutingNode"]: - if lx > self.level: - raise ValueError(f"The node with level {lx} is not in self") - - if lx == self.level: - return [self] - - nodes = [] - - def dfs_preorder(root: RoutingNode, lx: Level, method: str = "nearest") -> None: - if root.level == lx + 1: - nodes.extend(root.children) - return - - for child in root.children: - dfs_preorder(child, lx, method) - - dfs_preorder(self, lx, method) - - return nodes - - def _find_lx_node_with_n_child_avail( - self, lx: Level, n_child_avail: int, method: str = "nearest" - ) -> Optional["RoutingNode"]: - """Find the child of level `lx` with at least \ - `n_child_avail` children available. - """ - if lx > self.level: - raise ValueError(f"The node with level {lx} is not in self") - - if lx == self.level: - if self.n_child_not_occpuied() >= n_child_avail: - return self - else: - return None - - if lx < self.level: - for child in self.children: - node = child._find_lx_node_with_n_child_avail(lx, n_child_avail, method) - if node is not None: - return node - - return None - - def _find_lx_node_all_empty( - self, lx: Level, method: str = "nearest" - ) -> Optional["RoutingNode"]: - if lx > self.level: - raise ValueError(f"The node with level {lx} is not in self") - - if lx == self.level: - if self.status == NodeStatus.ALL_EMPTY: - return self - else: - return None - - if lx < self.level: - for child in self.children: - node = child._find_lx_node_all_empty(lx, method) - if node is not None: - return node - - return None - - def find_lx_node_for_routing( - self, - lx: Level, - n_child_avail: int = 1, - method: str = "nearest", - ) -> List["RoutingNode"]: - """Find lx-level node for placing. - - Args: - - lx: the level of node to be found(lx > L0). - - n_child_avail: find the node with at least `N` free child left. - - method: nearest or by the path. The paremeter is reserved. - """ - - def _get_child_nodes( - routing_node: RoutingNode, - ) -> List["RoutingNode"]: - if n_child_avail == 4: - return routing_node.children - elif n_child_avail == 2: - not_empty = routing_node.n_child_not_empty() - if not_empty > 0: - return routing_node.children[2:] - else: - return routing_node.children[:2] - elif n_child_avail == 1: - avail_child = routing_node.get_avail_child(method) - if avail_child: - return [avail_child] - else: - # TODO Hard to describe - raise NotSupportedError - - return [] - - if lx > self.level: - raise ValueError(f"The node with level {lx} is not in self") - - if lx == self.level: - node = self._find_lx_node_with_n_child_avail(lx, n_child_avail, method) - if node is not None: - return _get_child_nodes(node) - else: - for child in self.children: - # Find the Lx-level node with `n_child_avail` Lx-1-level children. - lx_node = child._find_lx_node_with_n_child_avail( - lx, n_child_avail, method - ) - if lx_node is not None: - return _get_child_nodes(lx_node) - - return [] - - def add_item_to_L0_node(self, data: Any, method: str = "nearest") -> bool: - """Add item to the nearest available L0-level node.""" - if self.level == Level.L0: - self.add_item(data) - return True - - # Find the nearest available L1-level node. - L1_node = self._find_lx_node_with_n_child_avail(Level.L1, 1) - - if L1_node is None: - # No available L1-level node found. - return False - - # Find the nearest available L0-level node. - L0_node = L1_node.get_avail_child(method) - if L0_node is None: - return False - - L0_node.add_item(data) - return True - - def n_child_occupied(self) -> int: - """Get #N of occpuied children.""" - return sum(child.status == NodeStatus.OCCUPIED for child in self.children) - - def n_child_not_occpuied(self) -> int: - return self.node_capacity - self.n_child_occupied() - - def n_child_empty(self) -> int: - """Get #N of empty children.""" - return sum(child.status == NodeStatus.ALL_EMPTY for child in self.children) - - def n_child_not_empty(self) -> int: - return self.node_capacity - self.n_child_empty() - - def is_full(self) -> bool: - return len(self.children) == self.node_capacity - - def is_empty(self) -> bool: - return len(self.children) == 0 - - def is_children_all_status(self, status: NodeStatus) -> bool: - return all(child.status == status for child in self.children) - - def is_sub_node_all_status(self, status: NodeStatus) -> bool: - if self.level == Level.L1: - return self.is_children_all_status(status) - - for child in self.children: - if not child.is_sub_node_all_status(status): - return False - - return True - - def __getitem__(self, index: int) -> "RoutingNode": - return self.children[index] - - def __contains__(self, item: "RoutingNode") -> bool: - return item in self.children - - @property - def level(self) -> Level: - return self._level - - @property - def node_capacity(self) -> int: - return 4 if self.level > Level.L0 else 0 - - @property - def children(self) -> List["RoutingNode"]: - return self._children - - @property - def status(self) -> NodeStatus: - return self._status - - @status.setter - def status(self, new_status: NodeStatus) -> None: - self._status = new_status - - def node_status_update(self, method: str = "nearest") -> None: - """Update the status of the node and its children \ - of all levels(from `self.level` to L1). - """ - - def dfs_preorder(root: RoutingNode, method: str) -> None: - if root.level > Level.L1: - for child in root.children: - dfs_preorder(child, method) - - root._status_update() - - if self.level > Level.L0: - dfs_preorder(self, method) - - def _status_update(self) -> None: - """Update the status of the node.""" - if self.is_sub_node_all_status(NodeStatus.OCCUPIED): - self._status = NodeStatus.OCCUPIED - elif self.is_sub_node_all_status(NodeStatus.ALL_EMPTY): - self._status = NodeStatus.ALL_EMPTY - else: - self._status = NodeStatus.AVAILABLE - - -@final -class RoutingRoot(RoutingNode): - def __init__(self, empty_root: bool = False, **kwargs) -> None: - """Initialize a routing quadtree root. \ - The level of the root is L5. - - Args: - empty_root: whether to create a empty root. Default is false. - """ - super().__init__(Level.L5, **kwargs) - - if not empty_root: - for i in range(self.node_capacity): - L4_child = create_lx_full_tree(Level.L4, f"L4_{i}") - self.add_child(L4_child) - - def insert_gsyn_on_core(self, *cb_on_core: CorePlacement) -> None: - """Insert the grouped synapse on core into the tree. - - Steps: - - 1. Get the routing node consumption. - - 2. Based on the routing level, find the available node of the routing level. - """ - n_core_total = len(cb_on_core) - - cost = get_node_consumption(n_core_total) - level, next_n = cost.get_routing_level() - - # Find L2-level node with at least 2 L1 children available. - routing_node = self.find_lx_node_for_routing(level, next_n) - if routing_node is None: - raise ValueError - - for gsyn_on_core in cb_on_core: - leaf = RoutingNode( - Level.L0, gsyn_on_core, tag=f"leaf of {gsyn_on_core.name}" - ) - - -def create_lx_full_tree(lx: Level, root_tag: Optional[str] = None) -> RoutingNode: - """Create a full Lx-level routing tree. - - If creating a L4 routing tree, it will return: - L4 with #N children - -> L3 with #N children - -> L2 with #N children - -> L1 with #N children - -> L0 with no child - - where #N is `node_capacity`. - """ - root = RoutingNode(lx, tag=root_tag) - - if lx > Level.L0: - for i in range(root.node_capacity): - child = create_lx_full_tree(Level(lx - 1), f"L{lx-1}_{i}") - root.add_child(child) - - return root - - -def get_parent(tree: RoutingNode, node: RoutingNode) -> Optional[RoutingNode]: - """Get the parent node of the given node. \ - If not found, return None. - """ - - def dfs_preorder(tree, node) -> Optional[RoutingNode]: - if tree is node: - return None - - for child in tree.children: - if child is node: - return tree - else: - return dfs_preorder(child, node) - - return dfs_preorder(tree, node) diff --git a/paibox/backend/graphs.py b/paibox/backend/graphs.py index fc2269fd..54a6da71 100644 --- a/paibox/backend/graphs.py +++ b/paibox/backend/graphs.py @@ -98,7 +98,7 @@ def build( _edges: Collector[EdgeName, EdgeType] = Collector() for network in networks: - sub_nodes = network.nodes(level=1, include_self=False) + sub_nodes = network.nodes(include_self=False, find_recursive=True) _nodes += sub_nodes.include(InputProj, NeuDyn).unique() _edges += sub_nodes.subset(SynSys).unique() @@ -176,7 +176,7 @@ def _graph_supported_check(self) -> None: for onode in self.onodes.values() ): raise NotSupportedError( - f"Only output nodes with no more than {HwConfig.N_FANIN_PER_DENDRITE_MAX} " + f"only output nodes with no more than {HwConfig.N_FANIN_PER_DENDRITE_MAX} " f"neurons are supported." ) @@ -190,25 +190,30 @@ def _node_pos(self, node: NodeName) -> NodePosition: def build_check(self) -> None: if not self.has_built: - raise BuildError(f"The graph hasn't been built yet.") + raise BuildError(f"the graph hasn't been built yet.") - def group_edges(self) -> List[FrozenSet[EdgeType]]: + def group_edges(self) -> Tuple[List[FrozenSet[EdgeType]], List[int]]: """Group all edges according to a certain rule. - Return: a list of set of grouped edges. + Return: a list of set of grouped edges and a list of routing groups id. """ self.build_check() + rgid = 0 # routing group id + routing_groups_id: List[int] = [] gathered: List[FrozenSet[EdgeType]] = [] seen_edges: Set[EdgeType] = set() # Check if all edges are traversed for node in self.ordered_nodes: + """Process the predecessor nodes of nodes first, then process the successor nodes.""" if self.degree_of_nodes[node].in_degree > 1: edge_group = self._find_pred_edges(self.succ_dg, node) # Get the edges traversed for the first time comming_edges = edge_group.difference(seen_edges) seen_edges.update(comming_edges) + routing_groups_id.append(rgid) + rgid += 1 gathered.append(frozenset(comming_edges)) if self.degree_of_nodes[node].out_degree > 1: @@ -224,6 +229,7 @@ def group_edges(self) -> List[FrozenSet[EdgeType]]: succ_edges_sg = frozenset([succ_edges[i] for i in idx]) if succ_edges_sg not in gathered: seen_edges.update(succ_edges_sg) + routing_groups_id.append(rgid) gathered.append(succ_edges_sg) else: # FIXME Will this happen? @@ -232,16 +238,21 @@ def group_edges(self) -> List[FrozenSet[EdgeType]]: succ_edges_sg = frozenset(succ_edges) if succ_edges_sg not in gathered: seen_edges.update(succ_edges_sg) + routing_groups_id.append(rgid) gathered.append(succ_edges_sg) else: # FIXME Will this happen? raise NotSupportedError + rgid += 1 + elif self.degree_of_nodes[node].out_degree == 1: succ_node = list(self.succ_dg[node].keys())[0] # Check the in-degree of the only following node. if self.degree_of_nodes[succ_node].in_degree == 1: gathered.append(frozenset({self.succ_dg[node][succ_node].edge})) + routing_groups_id.append(rgid) + rgid += 1 else: # This edge is waiting to be processed when # traversing the following node `succ_node`. @@ -250,7 +261,7 @@ def group_edges(self) -> List[FrozenSet[EdgeType]]: # out-degree = 0, do nothing. continue - return gathered + return gathered, routing_groups_id @staticmethod def _find_pred_edges( @@ -273,7 +284,7 @@ def inherent_timestep(self) -> int: @property def graph_name_repr(self) -> str: - _str = f"Graph_of_{self.networks[0].name}" + _str = f"graph_of_{self.networks[0].name}" for network in self.networks[1:]: _str += f"_and_{network.name}" @@ -286,34 +297,55 @@ def _degree_check( ) -> None: """Filter out such network structure, which is currently not supported.""" for node in filter(lambda node: degree_of_nodes[node].out_degree > 1, succ_dg): - if any(degree_of_nodes[succ_node].in_degree > 1 for succ_node in succ_dg[node]): - raise NotSupportedError( - "If out-degree of a node is greater than 1, " - "the in-degree of its sucessors must be 1." - ) + for succ_node in succ_dg[node]: + if degree_of_nodes[succ_node].in_degree > 1: + _node_repr = ( + succ_node.name if isinstance(succ_node, CoreBlock) else succ_node + ) + raise NotSupportedError( + f"If out-degree of a node is greater than 1, the in-degree of its sucessors must be 1. " + f"However, in-degree of {_node_repr} is {degree_of_nodes[succ_node].in_degree}." + ) def convert2routing_groups( succ_dg_of_cb: Dict[CoreBlock, List[CoreBlock]], degrees_of_cb: Dict[CoreBlock, NodeDegree], + input_core_blocks: Dict[SourceNodeType, List[CoreBlock]], ) -> List[RoutingGroup]: ordered_core_blocks = toposort(succ_dg_of_cb) seen_cb = set() routing_groups = [] + succ_cb_gid_dict = defaultdict(list) _degree_check(degrees_of_cb, succ_dg_of_cb) + # After that, all input core blocks have been traversed. + for input_cbs in input_core_blocks.values(): + seen_cb.update(input_cbs) + routing_groups.append(RoutingGroup(*input_cbs)) + for cb in ordered_core_blocks: - # Check whether it has been traversed + # Check whether the core block has been traversed. This judgment condition is for + # core blocks with out-degree = 1 & output core blocks (out-degree = 0). if cb not in seen_cb: seen_cb.add(cb) routing_groups.append(RoutingGroup(cb)) - # If the out-degree > 1, treat the following core blocks as one routing group. + # If out-degree > 1, group successor core blocks according to their routing id. if degrees_of_cb[cb].out_degree > 1: succ_cbs = succ_dg_of_cb[cb] seen_cb.update(succ_cbs) - routing_groups.append(RoutingGroup(*succ_cbs)) + + succ_cb_gid_dict.clear() + for succ_cb in succ_cbs: + if succ_cb._routing_id in succ_cb_gid_dict: + succ_cb_gid_dict[succ_cb._routing_id].append(succ_cb) + else: + succ_cb_gid_dict[succ_cb._routing_id] = [succ_cb] + + for succ_cb in succ_cb_gid_dict.values(): + routing_groups.append(RoutingGroup(*succ_cb)) return routing_groups @@ -372,7 +404,7 @@ def toposort(directed_edges: Mapping[_NT, Iterable[_NT]]) -> List[_NT]: vertices.add(m) if any(incoming_edges.get(v, None) for v in directed_edges): - raise NotSupportedError("The graph with cycles is not supported yet.") + raise NotSupportedError("the graph with cycles is not supported.") return ordered @@ -468,7 +500,7 @@ def get_longest_path( Return: A tuple containing the longest path in the graph and its distance. """ - distances: Dict[_NT, int] = defaultdict(int) # init value = 0 + distances: Dict[_NT, int] = {node: 0 for node in ordered_nodes} pred_nodes: Dict[_NT, _NT] = dict() for node in ordered_nodes: @@ -487,6 +519,7 @@ def get_longest_path( distance = distances[node] path = [node] + # Construct the longest path by following the predecessors while path[-1] in pred_nodes: path.append(pred_nodes[path[-1]]) @@ -515,9 +548,13 @@ def get_shortest_path( distances: Dict[_NT, int] = defaultdict(lambda: MAX_DISTANCE) pred_nodes: Dict[_NT, _NT] = dict() - # Set initial value for all inputs nodes. - for inode in input_nodes: - distances[inode] = 0 + # Set initial value for all inputs nodes. If there is no input node, + # the first node after topological sorting will be used as the starting node. + if input_nodes: + for inode in input_nodes: + distances[inode] = 0 + else: + distances[ordered_nodes[0]] = 0 for node in ordered_nodes: for neighbor, edge_attr in edges_with_d[node].items(): @@ -535,6 +572,7 @@ def get_shortest_path( distance = distances[node] path = [node] + # Construct the shortest path by following the predecessors while path[-1] in pred_nodes: path.append(pred_nodes[path[-1]]) diff --git a/paibox/backend/mapper.py b/paibox/backend/mapper.py index de171248..d39798d8 100644 --- a/paibox/backend/mapper.py +++ b/paibox/backend/mapper.py @@ -1,3 +1,4 @@ +import sys from collections import defaultdict from copy import copy from pathlib import Path @@ -12,7 +13,7 @@ to_coord, ) -from paibox.base import NeuDyn +from paibox.base import NeuDyn, SynSys from paibox.exceptions import ResourceError from paibox.network import DynSysGroup @@ -30,7 +31,7 @@ ) from .context import _BACKEND_CONTEXT, set_cflag from .graphs import PAIGraph, convert2routing_groups, get_node_degrees -from .graphs_types import NodeDegree +from .graphs_types import NodeDegree, SourceNodeType from .placement import CoreBlock, aligned_coords, max_lcn_of_cb from .routing import RoutingGroup, RoutingRoot @@ -50,32 +51,23 @@ class Mapper: def __init__(self) -> None: self.core_blocks: List[CoreBlock] = [] - """A list for core blocks in topological order.""" - + """List for core blocks in the network.""" self.succ_core_blocks: Dict[CoreBlock, List[CoreBlock]] = defaultdict(list) - """Grouped post-synapses of nodes. Structure: - { - node1.name: { - post-node1.name: grouped post-syn1, - post-node2.name: grouped post-syn2 - } - } - """ - self.degrees_of_cb: Dict[CoreBlock, NodeDegree] = defaultdict(NodeDegree) + self.input_core_blocks: Dict[SourceNodeType, List[CoreBlock]] = defaultdict( + list + ) + """List of input core blocks for each input node.""" - self.core_params: Dict[Coord, CoreConfig] = dict() - """The dictionary of core parameters. Structure: - { - address of core: { - parameters... - } - } - """ + self.degrees_of_cb: Dict[CoreBlock, NodeDegree] = defaultdict(NodeDegree) self.routing_groups: List[RoutingGroup] = [] self.core_plm_config: CorePlacementInfo = dict() + self.core_params: Dict[Coord, CoreConfig] = dict() + """The dictionary of core parameters.""" + self.graph_info: GraphInfo self.n_core_required = 0 + self.n_core_occupied = 0 self.clear() @@ -85,10 +77,14 @@ def clear(self) -> None: self.core_blocks.clear() self.succ_core_blocks.clear() + self.input_core_blocks.clear() self.core_params.clear() self.core_plm_config.clear() + self.n_core_required = 0 + self.n_core_occupied = 0 + # Set default cflags _BACKEND_CONTEXT.cflags.clear() set_cflag(enable_wp_opt=True) @@ -132,7 +128,7 @@ def compile( occupied cores, or both. The default is specified by the corresponding compilation option in the\ backend configuration item (`both` by default). - Return: compiled network information in dictionary form. + Return: network information after compilation in dictionary format. """ if weight_bit_optimization is not None: set_cflag(enable_wp_opt=weight_bit_optimization) @@ -140,22 +136,25 @@ def compile( if grouping_optim_target is not None: set_cflag(grouping_optim_target=grouping_optim_target) - """Backend compilation.""" + """1. Check whether the PAIGraph has built.""" self._build_check() - """1. Build core blocks.""" + """2. Set global compilation flags.""" + self._set_global_cflags() + + """3. Build core blocks.""" self.build_core_blocks() - """2. Adjust the LCN extension of each core block.""" + """4. Adjust the LCN extension of each core block.""" self.lcn_ex_adjustment() - """3. Core coordinate assignment.""" + """5. Core coordinate assignment.""" self.coord_assign() - """4. Allocate the core blocks to the `CorePlacement`.""" + """6. Allocate the core blocks to the `CorePlacement`.""" self.core_allocation() - """5. Export parameters.""" + """7. Export parameters.""" return self.config_export() def build_core_blocks(self) -> None: @@ -163,16 +162,22 @@ def build_core_blocks(self) -> None: Description: Group all edges & build `CoreBlock` based on the grouped edges. """ - grouped_edges = self.graph.group_edges() - - for syns in grouped_edges: - self.core_blocks.append( - CoreBlock.build( - *syns, - seed=0, - enable_wp_opt=_BACKEND_CONTEXT.cflags["enable_wp_opt"], + grouped_edges, routing_groups_id = self.graph.group_edges() + + if sys.version_info >= (3, 10): + for syns, routing_id in zip(grouped_edges, routing_groups_id, strict=True): + self.core_blocks.append( + CoreBlock.build(*syns, seed=0, routing_id=routing_id) ) - ) + else: + if len(grouped_edges) != len(routing_groups_id): + raise ValueError( + f"the length of grouped edges & routing groups id are not equal, " + f"{len(grouped_edges)} != {len(routing_groups_id)}" + ) + + for syns, routing_id in zip(grouped_edges, routing_groups_id): + self.core_blocks.append(CoreBlock.build(*syns, routing_id=routing_id)) for cb in self.core_blocks: succ_cbs = list( @@ -183,10 +188,28 @@ def build_core_blocks(self) -> None: ) self.succ_core_blocks[cb].extend(succ_cbs) + for inode in self.graph.inodes.values(): + # TODO How to prevent this situation: there is input node & predecessor nodes + # in a certain core blocks. + + # Disconnected input nodes will not be recorded. + succ_cb = [cb for cb in self.core_blocks if inode in cb.source] + if len(succ_cb) > 0: + self.input_core_blocks[inode] = succ_cb + self.degrees_of_cb = get_node_degrees(self.succ_core_blocks) def lcn_ex_adjustment(self) -> None: """Adjust the LCN extension of each core block.""" + # In the absence of the above complex situations, the following judgment is useless. + # But it'd be better to add this lcn adjustment. + for input_cbs in self.input_core_blocks.values(): + if len(input_cbs) > 1: + max_lcn_ex = max_lcn_of_cb(input_cbs) + # Adjust the `lcn_ex` of the input core blocks for each input node + for g in input_cbs: + g.lcn_ex = max_lcn_ex + for cb in self.core_blocks: succ_cb = self.succ_core_blocks[cb] @@ -210,9 +233,8 @@ def lcn_ex_adjustment(self) -> None: def coord_assign(self) -> None: """Assign the coordinate of each `CorePlacement`. - NOTE: The neurons in each core block must be grouped first \ - to determine the #N of cores required, and then the \ - routing coordinates can be assigned. + NOTE: The neurons in each core block must be grouped first to determine the #N of cores required, \ + and then the routing coordinates can be assigned. """ for cb in self.core_blocks: # Group the neurons, get the #N of cores required. @@ -220,34 +242,42 @@ def coord_assign(self) -> None: optim_target=_BACKEND_CONTEXT.cflags["grouping_optim_target"] ) - # Calculate the consumption of physical cores required. + # Calculate the consumption of required physical cores. if ( n_core_required := sum(cb.n_core_required for cb in self.core_blocks) ) > HwConfig.N_CORE_OFFLINE: raise ResourceError( - f"#N of total cores required out of {HwConfig.N_CORE_OFFLINE} ({n_core_required})." + f"the number of required cores is out of range {HwConfig.N_CORE_OFFLINE} ({n_core_required})." ) self.n_core_required = n_core_required # Generate routing groups by given the list of core blocks. routing_groups = convert2routing_groups( - self.succ_core_blocks, self.degrees_of_cb + self.succ_core_blocks, self.degrees_of_cb, self.input_core_blocks ) for rg in routing_groups: if not self.routing_tree.insert_routing_group(rg): raise RuntimeError( - f"Insert routing group {rg} into the routing tree failed." + f"insert routing group {rg} into the routing tree failed." ) self.routing_groups = routing_groups + # Calculate the consumption of occupied physical cores. + if ( + n_core_occupied := sum(rg.get_n_core_occupied() for rg in routing_groups) + ) > HwConfig.N_CORE_OFFLINE: + raise ResourceError( + f"the number of occupied cores is out of range {HwConfig.N_CORE_OFFLINE} ({n_core_occupied})." + ) + + self.n_core_occupied = n_core_occupied + def core_allocation(self) -> None: - """Allocate the core blocks to the physical cores. \ - The order of `core_plms` is the same as `core_blocks`. - """ - for cb in self.core_blocks: - cb.core_plm_alloc() + """Allocate the routing groups to core placements level.""" + for rg in self.routing_groups: + rg.core_block_alloc() def config_export(self) -> GraphInfo: """Export parameters of cores & neurons inside. @@ -266,7 +296,7 @@ def config_export(self) -> GraphInfo: members=self.core_plm_config, # The configuration of physical cores is in `core_plm_config` inherent_timestep=self.graph.inherent_timestep, n_core_required=self.n_core_required, - # n_core_occupied=self.n_core_occupied, + n_core_occupied=self.n_core_occupied, extras={"name": self.graph.graph_name_repr}, ) @@ -274,6 +304,9 @@ def config_export(self) -> GraphInfo: return _graph_info + def _set_global_cflags(self) -> None: + SynSys.CFLAG_ENABLE_WP_OPTIMIZATION = _BACKEND_CONTEXT.cflags["enable_wp_opt"] + def _inpproj_config_export(self) -> InputNodeInfo: """Export the configuration of input projections. @@ -294,48 +327,46 @@ def _inpproj_config_export(self) -> InputNodeInfo: """ input_nodes_info = dict() - # Traverse input core blocks where input nodes are - for input_cb in filter( - lambda cb: any(s for s in cb.source if s in self.graph.inodes.values()), - self.core_blocks, - ): - dest_coords = input_cb.core_coords + # Traverse input core blocks + for inode, input_cbs in self.input_core_blocks.items(): + dest_coords: List[Coord] = [] + for cb in input_cbs: # Do not use iterative generation. + dest_coords.extend(cb.core_coords) + dest_rid = get_replication_id(dest_coords) - # Traverse input nodes in the input core block only, in case that - # "other source nodes" are grouped with the input nodes. - for inode in filter( - lambda s: s in self.graph.inodes.values(), input_cb.source - ): - axon_coords = aligned_coords( - slice(0, input_cb.n_axon_of(input_cb.source.index(inode)), 1), - input_cb.axon_segments[inode], - 1, - input_cb.n_timeslot, - ) + # The arrangement of axons is the same for the rest of `input_cbs`. + # LCN of `input_cbs` are the same. + input_cb = input_cbs[0] + axon_coords = aligned_coords( + slice(0, input_cb.n_axon_of(input_cb.source.index(inode)), 1), + input_cb.axon_segments[inode], + 1, + input_cb.n_timeslot, + ) - neuron_dest = NeuronDest( - [coord.tick_relative for coord in axon_coords], - [coord.addr_axon for coord in axon_coords], - dest_coords[0].x, - dest_coords[0].y, - dest_rid.x, - dest_rid.y, - _BACKEND_CONTEXT["local_chip_addr"].x, - _BACKEND_CONTEXT["local_chip_addr"].y, - ) + neuron_dest = NeuronDest( + [coord.tick_relative for coord in axon_coords], + [coord.addr_axon for coord in axon_coords], + dest_coords[0].x, + dest_coords[0].y, + dest_rid.x, + dest_rid.y, + _BACKEND_CONTEXT["local_chip_addr"].x, + _BACKEND_CONTEXT["local_chip_addr"].y, + ) - input_nodes_info[inode.name] = neuron_dest + input_nodes_info[inode.name] = neuron_dest return input_nodes_info def _member_cb_and_onode_config_export(self) -> OutputDestInfo: - """Export the configuration of member core blocks & output destinations. + """Export configuration & output destinations inormation for core blocks. Description: Traverse core placements in core blocks, find the following core \ blocks where the axons at. Get the coordinate of the core placement \ - & coordinates of axons(for broadcasting). + & coordinates of axons(for multicasting). Json exchange file format for output nodes: { @@ -359,44 +390,118 @@ def _member_cb_and_onode_config_export(self) -> OutputDestInfo: # Shallow copy ocoord = copy(_BACKEND_CONTEXT["output_core_addr_start"]) - for member_cb in self.core_blocks: - self.core_params.update( - CoreBlock.export_core_plm_config(member_cb) - ) # compatible for py3.8 - - output_axon_offset = 0 - for core_plm in member_cb.core_placements.values(): - for neu_seg in core_plm.neu_segs_of_cplm: - # Find the axon destinations - dest_cb = [ - cb for cb in self.core_blocks if neu_seg.parent in cb.source - ] - - # FIXME It is necessary to ensure that when there are both output nodes - # & member nodes in the same `CoreBlock`, they need to be allocated on - # different physical cores, otherwise routing problem will occur. - if dest_cb: # `neu_seg` is memeber neurons - # Should not happen - assert _cb_routable(self.routing_groups, dest_cb) - core_plm.export_neu_config(neu_seg, dest_cb) + for rg in self.routing_groups: + for member_cb in rg: + self.core_params.update( + CoreBlock.export_core_plm_config(member_cb) + ) # compatible for py3.8 + + if self.degrees_of_cb[member_cb].out_degree == 0: + # member_cb is a pure output core block. All neu_segs inside are output neurons. + ocoord = self._onode_cb_config_export( + member_cb, output_dest_info, ocoord + ) + elif any(d in self.graph.onodes.values() for d in member_cb.dest): + # member_cb is both a member & output core block. + ocoord = self._member_onode_cb_config_export( + member_cb, output_dest_info, ocoord + ) + else: + # member_cb is a pure member. + self._member_cb_config_export(member_cb) + + for coord, core_plm in member_cb.core_placements.items(): + self.core_plm_config[coord] = core_plm.export_core_plm_config() + + # Generate default configurations for wasted core placements of the routing group + self.core_plm_config.update(rg.get_wasted_cplm_config()) + + return output_dest_info + + def _member_cb_config_export(self, member_cb: CoreBlock) -> None: + """Export configuration information for core blocks that are pure members.""" + succ_cbs = self.succ_core_blocks[member_cb] + + for core_plm in member_cb.core_placements.values(): + for neu_seg in core_plm.neu_segs_of_cplm: + # Find the axon destinations of neu_seg, not the successor core blocks. + dest_cb_of_nseg = [cb for cb in succ_cbs if neu_seg.parent in cb.source] + + assert _cb_routable(self.routing_groups, dest_cb_of_nseg) + core_plm.export_neu_config(neu_seg, dest_cb_of_nseg) + + def _member_onode_cb_config_export( + self, + member_onode_cb: CoreBlock, + output_dest_info: OutputDestInfo, + ocoord: Coord, + ) -> Coord: + """Export configuration information for core blocks that are both members & output.""" + cur_ocoord = ocoord + output_axon_offset = 0 + o_nodes = [d for d in member_onode_cb.dest if d in self.graph.onodes.values()] + succ_cbs = self.succ_core_blocks[member_onode_cb] + + for core_plm in member_onode_cb.core_placements.values(): + for neu_seg in core_plm.neu_segs_of_cplm: + dest_cb_of_nseg = [cb for cb in succ_cbs if neu_seg.parent in cb.source] + + if len(dest_cb_of_nseg) > 0: + assert _cb_routable(self.routing_groups, dest_cb_of_nseg) + core_plm.export_neu_config(neu_seg, dest_cb_of_nseg) + else: + offset_idx = o_nodes.index(neu_seg.parent) + + if hasattr(CoordOffset, "from_offset"): + # For paicorelib > 0.0.13 + raise NotImplementedError else: - # `neu_seg` is output neurons. Every neuron segment is a output node. - # Update the offset of axon - output_axon_offset = core_plm.export_neu_config( - neu_seg, - output_core_coord=ocoord, - axon_addr_offset=output_axon_offset, + # For paicorelib <= 0.0.13 + cur_ocoord = ocoord + CoordOffset( + offset_idx % 32, offset_idx // 32 ) - output_dest_info[neu_seg.parent.name][ - core_plm.coord.address - ] = core_plm.neu_configs[neu_seg.parent].neuron_dest_info - - # Coord.x += 1 for the destination of the next output node - ocoord += CoordOffset(1, 0) - self.core_plm_config[core_plm.coord] = core_plm.export_core_plm_config() + output_axon_offset = core_plm.export_neu_config( + neu_seg, + output_core_coord=cur_ocoord, + axon_addr_offset=output_axon_offset, + ) + output_dest_info[neu_seg.parent.name][core_plm.coord.address] = ( + core_plm.neu_configs[neu_seg.parent].neuron_dest_info + ) + + return cur_ocoord + + def _onode_cb_config_export( + self, onode_cb: CoreBlock, output_dest_info: OutputDestInfo, ocoord: Coord + ) -> Coord: + """Export configuration information for core blocks that are pure output.""" + cur_ocoord = ocoord + output_axon_offset = 0 + o_nodes = [d for d in onode_cb.dest if d in self.graph.onodes.values()] + + for core_plm in onode_cb.core_placements.values(): + for neu_seg in core_plm.neu_segs_of_cplm: + # Get the output coordinate of this neu_seg + offset_idx = o_nodes.index(neu_seg.parent) + + if hasattr(CoordOffset, "from_offset"): + # For paicorelib > 0.0.13 + raise NotImplementedError + else: + # For paicorelib <= 0.0.13 + cur_ocoord = ocoord + CoordOffset(offset_idx % 32, offset_idx // 32) + + output_axon_offset = core_plm.export_neu_config( + neu_seg, + output_core_coord=cur_ocoord, + axon_addr_offset=output_axon_offset, + ) + output_dest_info[neu_seg.parent.name][core_plm.coord.address] = ( + core_plm.neu_configs[neu_seg.parent].neuron_dest_info + ) - return output_dest_info + return cur_ocoord def export( self, @@ -413,17 +518,16 @@ def export( Args: - write_to_file: whether to write frames into file. - fp: If `write_to_file` is `True`, specify the output path. - - format: `txt`, `bin`, or `npy`.`bin` & `npy` are recommended. - - split_by_coordinate: whether to split the generated frames file by the \ - core coordinates. - - local_chip_addr: the address of the local chip. If not specified, the \ - default value in `_BACKEND_CONTEXT` will be used. + - format: `txt`, `bin`, or `npy`. `bin` is recommended. + - split_by_coordinate: whether to split the generated frames file by the core coordinates. + - local_chip_addr: the address of the local chip. If not specified, the default value in \ + `_BACKEND_CONTEXT` will be used. - export_core_params: whether to export the parameters of occupied cores. Return: a dictionary of configurations. """ if format not in ("bin", "npy", "txt"): - raise ValueError(f"Format {format} is not supported.") + raise ValueError(f"format {format} is not supported.") _fp = _fp_check(fp) @@ -459,7 +563,7 @@ def find_neuron(self, neuron: NeuDyn, *, verbose: int = 0) -> None: # Find neuron in one or more core blocks. if neuron in cb.dest: print( - f"Neurons {neuron.name} placed in {cb.name}, LCN_{1 << cb.lcn_ex}X" + f"neurons {neuron.name} placed in {cb.name}, LCN_{1 << cb.lcn_ex}X" ) for core_plm in cb.core_placements.values(): for neu_seg in core_plm.neu_segs_of_cplm: @@ -476,7 +580,7 @@ def find_axon(self, neuron: NeuDyn, *, verbose: int = 0) -> None: for cb in self.core_blocks: # Find neuron in one or more core blocks. if neuron in cb.source: - print(f"Axons {neuron.name} placed in {cb.name}, LCN_{1 << cb.lcn_ex}X") + print(f"axons {neuron.name} placed in {cb.name}, LCN_{1 << cb.lcn_ex}X") axon_segment = cb.axon_segments[neuron] print( f"{neuron.name} placed in {cb.core_coords}\n" diff --git a/paibox/backend/placement.py b/paibox/backend/placement.py index b2908d97..5df29497 100644 --- a/paibox/backend/placement.py +++ b/paibox/backend/placement.py @@ -26,12 +26,17 @@ from paicorelib import WeightPrecision as WP from paibox.base import NeuDyn, PAIBoxObject -from paibox.exceptions import BuildError, NotSupportedError, ResourceError +from paibox.exceptions import BuildError, ResourceError, TruncationWarning from paibox.synapses import SynSys from paibox.types import WeightType from paibox.utils import check_attr_same, count_unique_elem -from .conf_template import CoreConfig, CorePlacementConfig, NeuronConfig +from .conf_template import ( + CoreConfig, + CorePlacementConfig, + EmptyCorePlacementConfig, + NeuronConfig, +) from .context import _BACKEND_CONTEXT from .graphs_types import DestNodeType, SourceNodeType from .segment_utils import ( @@ -47,14 +52,6 @@ class CoreAbstract(HwCore, PAIBoxObject): - SUPPORTED_WP: ClassVar[Tuple[WP, ...]] = ( - WP.WEIGHT_WIDTH_1BIT, - WP.WEIGHT_WIDTH_2BIT, # Not verified - WP.WEIGHT_WIDTH_4BIT, # Not verified - WP.WEIGHT_WIDTH_8BIT, - ) - """Supported weight precision.""" - SUPPORTED_MODE: ClassVar[Tuple[CoreMode, ...]] = (CoreMode.MODE_SNN,) """Supported core modes.""" @@ -65,24 +62,21 @@ class CoreBlock(CoreAbstract): RUNTIME_MODE: ClassVar[CoreMode] = CoreMode.MODE_SNN def __init__( - self, - *parents: SynSys, - weight_precision: WP, - seed: int = 0, - name: Optional[str] = None, + self, *parents: SynSys, routing_id: int, seed: int, name: Optional[str] = None ) -> None: - """ - Arguments: + """Core blocks in SNN mode. + + Args: - parents: the parent synapses. - - weight_precision: the precision of weight matrix. - - seed: the random seed. - - name: the name of the core block. Optional. + - routing_id: id of routing group. + - seed: random seed. Default value is 0. + - name: name of the core block. Optional. """ - super().__init__(name) self._parents = parents - self._lcn_ex = n_axon2lcn_ex(self.n_axon, self.n_fanin_max) - self.weight_precision = weight_precision + self._lcn_ex = self._n_axon2lcn_ex() + self._wp = WP.WEIGHT_WIDTH_8BIT # Default value + self._routing_id = routing_id self.seed = seed """Random seed, legal integer, no more than uint64.""" @@ -131,9 +125,7 @@ def core_plm_alloc(self) -> None: for i, coord in enumerate(self.core_coords): # assert self.get_raw_weight_of_coord(i)[0].shape[0] == self.n_axon - self.core_placements[coord] = CorePlacement.build( - self, i, self.get_raw_weight_of_coord(i) - ) + self.core_placements[coord] = CorePlacement.build(self, i) def _get_syn_of(self, src: SourceNodeType, dest: DestNodeType) -> Optional[SynSys]: for syn in self.obj: @@ -142,6 +134,28 @@ def _get_syn_of(self, src: SourceNodeType, dest: DestNodeType) -> Optional[SynSy return None + def _n_axon2lcn_ex(self) -> LCN_EX: + """Convert #N(of axons) to `LCN_EX` & check. + + NOTE: LCN_EX = log2[ceil(#N/fan-in per dendrite)], where `LCN_1X` = 0. + """ + if self.n_axon < 1: + raise ValueError( + f"the number of axons must be positive, but got {self.n_axon}." + ) + + if ( + lcn := ((self.n_axon - 1) // self.n_fanin_max).bit_length() + ) > LCN_EX.LCN_64X: + _max_n_axons = self.n_fanin_max * (1 << LCN_EX.LCN_64X) + raise ResourceError( + f"required LCN extension out of range {LCN_EX.LCN_64X} ({lcn}). " + f"The number of axons must be <= {_max_n_axons}. " + f"But synapses {self._obj_repr()} have a total of {self.n_axon} axons." + ) + + return LCN_EX(lcn) + def copy(self): raise NotImplementedError @@ -196,14 +210,17 @@ def n_fanin_max(self) -> int: def n_core_required(self) -> int: return len(self.neuron_segs_of_cb) + @property + def weight_precision(self) -> WP: + # Optimized in `s.weight_precision`. + return max(s.weight_precision for s in self.obj) + @property def n_dendrite_per_neuron(self) -> int: - """Multiple dendrites will be combined to achieve higher \ - precision weights. + """Multiple dendrites will be combined to achieve higher precision weights. - FIXME The limit on the number of dendrites in SNN/ANN modes \ - is different, which affects the capacity of neurons in \ - physical core. + FIXME The limit on the number of dendrites in SNN/ANN modes is different, which affects \ + the capacity of neurons in physical core. """ return 1 << self.weight_precision @@ -220,7 +237,7 @@ def lcn_ex(self, lcn_ex: LCN_EX) -> None: """Set or adjust the `lcn_ex` & lock.""" if lcn_ex > LCN_EX.LCN_64X: raise ResourceError( - f"LCN extension required out of {LCN_EX.LCN_64X}: {lcn_ex}" + f"required LCN extension out of range {LCN_EX.LCN_64X} ({lcn_ex})." ) self._lcn_ex = lcn_ex @@ -267,8 +284,8 @@ def n_neuron(self) -> int: return sum(d.num_in for d in self.dest) @property - def unroll_factor(self) -> List[int]: - return [d.unroll_factor for d in self.dest] + def unrolling_factor(self) -> List[int]: + return [d.unrolling_factor for d in self.dest] @property def n_neuron_of_plm(self) -> List[int]: @@ -277,17 +294,16 @@ def n_neuron_of_plm(self) -> List[int]: FIXME Different in SNN/ANN RUNTIME_MODE. """ if len(self.core_coords) == 0: - raise BuildError(f"Do this after coordinates assignment.") + raise BuildError(f"do this after coordinates assignment.") # Get #N of neurons on each `CorePlacement` according to the # maximum address required of neuron segments on each `CorePlacement`. - assert [] not in self.neuron_segs_of_cb # FIXME if it never happens, remove it. + assert [] not in self.neuron_segs_of_cb # TODO if it never happens, remove it. - n = [ + return [ sum(seg.n_neuron for seg in neuron_segs) for neuron_segs in self.neuron_segs_of_cb ] - return n @cached_property def raw_weight_of_dest(self) -> List[WeightType]: @@ -338,30 +354,21 @@ def __repr__(self) -> str: def __str__(self) -> str: return f"<{self.name} of target '{self.obj}'>" - @classmethod - def build(cls, *synapses: SynSys, seed: int = 0, enable_wp_opt: bool): - """Combine the SAME weight precision synapses and build the `CoreBlock`.""" - SynSys.CFLAG_ENABLE_WP_OPTIMIZATION = enable_wp_opt # TODO OK but ugly - - wp0 = synapses[0].weight_precision - # Check wether weight precision of all synapses equal. - if not all(wp0 == s.weight_precision for s in synapses): - raise NotSupportedError("Mixed weight precision is not supported yet") - - if wp0 > max(cls.SUPPORTED_WP): - raise NotSupportedError(f"{wp0.name} is not supported yet.") - - elif wp0 not in cls.SUPPORTED_WP: - # Treat lower bit weights as 8-bit weights. - wp0 = cls.SUPPORTED_WP[-1] + def _obj_repr(self) -> str: + """The representation of the names of target objects.""" + return ", ".join(n.name for n in self.obj) + @classmethod + def build(cls, *synapses: SynSys, routing_id: int, seed: int = 0): + """Group synapses & build `CoreBlock`.""" # FIXME where does the parameter check do? if seed > (1 << 64) - 1: warnings.warn( - f"Random seed {seed} is too large, truncated into 64 bits!", UserWarning + f"random seed {seed} is too large, truncated into 64 bits.", + TruncationWarning, ) - return cls(*synapses, weight_precision=wp0, seed=seed) + return cls(*synapses, routing_id=routing_id, seed=seed) @classmethod def export_core_plm_config(cls, cb: "CoreBlock") -> Dict[Coord, CoreConfig]: @@ -388,7 +395,6 @@ def __init__( parent: CoreBlock, routing_coord: Coord, n_neuron: int, - *, raw_weights: List[WeightType], neu_segs_of_cplm: NeuSegOfCorePlm, name: Optional[str] = None, @@ -416,18 +422,13 @@ def __init__( self.neu_configs: Dict[NeuDyn, NeuronConfig] = dict() @classmethod - def build(cls, parent: CoreBlock, idx: int, raw_weights: List[WeightType]): + def build(cls, parent: CoreBlock, idx: int): coord = parent.core_coords[idx] n_neuron = parent.n_neuron_of_plm[idx] neu_segs_of_cplm = parent.neuron_segs_of_cb[idx] + raw_weights = parent.get_raw_weight_of_coord(idx) - return cls( - parent, - coord, - n_neuron, - raw_weights=raw_weights, - neu_segs_of_cplm=neu_segs_of_cplm, - ) + return cls(parent, coord, n_neuron, raw_weights, neu_segs_of_cplm) def _fold_raw_weights(self, raw_weights: List[WeightType]) -> WeightType: """Fold the weights into LCN-sized blocks.""" @@ -472,7 +473,7 @@ def _weight_ram_mapping(self) -> WeightRamType: if self.n_weight_bits == 1: w_unpacked[:row, :col] = self._weights_folded else: - # [N*M] -> [M*N*1] + # (N, M) -> (M*N, 1) w_folded_3d = np.expand_dims(self._weights_folded.T, axis=2).astype( np.uint8 ) @@ -490,19 +491,20 @@ def _weight_ram_mapping(self) -> WeightRamType: :row, self.n_weight_bits * i : self.n_weight_bits * (i + 1) ] = unpacked - assert np.max(w_unpacked, axis=None) <= 1 - assert np.min(w_unpacked, axis=None) >= 0 + assert np.max(w_unpacked, axis=None) <= np.uint8(1) + assert np.min(w_unpacked, axis=None) >= np.uint8(0) # Convert the unpacked weights into a mapping format, # corresponding to the RAM address, each address contains 18 uint64. - # [512 * 1152] -> [(512*18) * 64](uint8). Reshape to 64 columns to avoid contiguous problem. + # (1152, 512) -> (512, 1152) -> (512*18, 64)(uint8). + # Reshape to 64 columns to avoid contiguous problem. w_unpacked_T_rehaped = w_unpacked.T.reshape(-1, 64) - # [(512*18) * 64](uint8) -> [(512*18) * 8](uint8) + # (512*18, 64)(uint8) -> (512*18, 8)(uint8) w_packed_u8 = np.packbits( w_unpacked_T_rehaped, axis=1, bitorder=HwConfig.WEIGHT_BITORDER ) - # [(512*18) * 8](uint8) -> [(512*18) * 1](uint64) -> [512 * 18](uint64) + # (512*18, 8)(uint8) -> (512*18, 1)(uint64) -> (512, 18)(uint64) w_packed_u64 = w_packed_u8.view(np.uint64).reshape(-1, 18) w_packed_u64.setflags(write=False) @@ -715,21 +717,55 @@ def __len__(self) -> int: return self.n_core_required -def n_axon2lcn_ex(n_axon: int, fan_in_max: int) -> LCN_EX: - """Convert #N(of axons) to `LCN_EX`. +class EmptyCorePlacement(CoreAbstract): + """Empty core placement.""" - Description: - LCN_EX = log2[ceil(#N/fan-in per dendrite)], where LCN_EX = 0 is `LCN_1X`. - """ - if n_axon < 1: - raise ValueError(f"The #N of axons must be positive, but got {n_axon}") + _default_wp: ClassVar[WP] = WP.WEIGHT_WIDTH_1BIT + _default_lcn_ex: ClassVar[LCN_EX] = LCN_EX.LCN_1X + _default_n_dendrite: ClassVar[int] = 0 + _default_tws: ClassVar[int] = 0 + _default_twe: ClassVar[int] = 0 + _default_target_lcn: ClassVar[LCN_EX] = LCN_EX.LCN_1X - if (lcn_bit := ((n_axon - 1) // fan_in_max).bit_length()) > LCN_EX.LCN_64X: - raise ResourceError( - f"LCN extension required out of {LCN_EX.LCN_64X}: {lcn_bit}" + def __init__(self, coord: Coord, name: Optional[str] = None) -> None: + super().__init__(name) + self.coord = coord + + def export_param_config(self) -> CoreConfig: + _mode_params = CoreModeDict[CoreMode.MODE_SNN] + # fmt: off + cb_config = CoreConfig( + self.name, # name of the core + self._default_wp, # weight_precision + self._default_lcn_ex, # lcn_extension + _mode_params[0], # input_width_format + _mode_params[1], # spike_width_format + self._default_n_dendrite, # num_dendrite + MaxPoolingEnable.DISABLE, # max_pooling_en + self._default_tws, # tick_wait_start + self._default_twe, # tick_wait_end + _mode_params[2], # snn_mode_en + self._default_target_lcn, # target_lcn + _BACKEND_CONTEXT.test_chip_addr, # test_chip_addr ) + # fmt: on + return cb_config + + def export_core_plm_config(self) -> EmptyCorePlacementConfig: + core_param = self.export_param_config() + return EmptyCorePlacementConfig.encapsulate(core_param) + + @classmethod + def build(cls, coord: Coord): + return cls(coord) - return LCN_EX(lcn_bit) + @property + def n_core_required(self): + return 1 + + @property + def shape(self) -> Tuple[int, int]: + return (0, 0) def max_lcn_of_cb(cb: List[CoreBlock]) -> LCN_EX: diff --git a/paibox/backend/routing.py b/paibox/backend/routing.py index 78510189..795d00e5 100644 --- a/paibox/backend/routing.py +++ b/paibox/backend/routing.py @@ -9,7 +9,8 @@ from paibox.exceptions import NotSupportedError -from .placement import CoreBlock, CorePlacement +from .conf_template import CorePlacementInfo +from .placement import CoreBlock, CorePlacement, EmptyCorePlacement __all__ = ["RoutingGroup", "RoutingRoot"] @@ -84,7 +85,7 @@ def add_child( ) -> bool: if self.level == Level.L0: # L0-level cluster cannot add child. - raise AttributeError(f"L0-level cluster cannot add child") + raise AttributeError(f"L0-level cluster cannot add child.") if self.is_full(): return False @@ -101,7 +102,7 @@ def add_child_to( ) -> bool: """Add a child cluster to a certain `direction`.""" if self.level - child.level != 1: - raise ValueError + raise ValueError(f"Cannot skip more than 1 level.") if not force and d in self.children: return False @@ -127,7 +128,7 @@ def find_cluster_by_path( if len(path) > self.level: raise ValueError( - f"The length of the {path} should be less than or equal to level, but yours is greater than" + f"the length of the {path} should be less than or equal to level." ) if path[0] not in self.children: @@ -150,7 +151,9 @@ def get_routing_path(self, cluster: "RoutingCluster") -> Optional[List[Direction - A list of `Direction` from L4 to L0. """ if cluster.level > self.level: - raise ValueError + raise ValueError( + f"Cannot get routing path because the level cluster is higher." + ) if cluster.level == self.level: if cluster != self: @@ -188,7 +191,7 @@ def n_child_avail(self) -> int: def _find_lx_cluster_with_n_child_avail( self, lx: Level, n_child_avail: int, method: str = "nearest" ) -> Optional["RoutingCluster"]: - """Find the child of level `lx` with at least `n_child_avail` children available.""" + """Find the child of level `lx` with at least `n_child_avail` child available.""" if lx > self.level: raise ValueError @@ -218,8 +221,7 @@ def add_subtree( subtree: "RoutingCluster", method: str = "nearest", ) -> bool: - """Add the subtree's children to itself. \ - If successful, return the added parent cluster.""" + """Add the subtree's children to itself. If successful, return the added parent cluster.""" if subtree.level > self.level: raise ValueError @@ -268,7 +270,7 @@ def add_subtree( else: # Only support 1, 2, & 4. raise NotSupportedError( - f"#N of {sub_n_child} children not supported yet." + f"the number of {sub_n_child} child is not supported." ) return True @@ -307,10 +309,10 @@ def create_lx_full_tree( @classmethod def create_routing_tree(cls, lx: Level, n_branch: int) -> "RoutingCluster": - """Create a routing tree with `n_branch` children. + """Create a routing tree with `n_branch` child. - NOTE: When lx == L1, do not create the L0-level children. \ - WHen lx > L1, create the lx-1 level children. + NOTE: When lx == L1, do not create the L0-level child. \ + WHen lx > L1, create the lx-1 level child. """ if lx == Level.L0 or n_branch < 0: raise ValueError @@ -339,10 +341,10 @@ def add_L0_for_placing(self, data: Any = None, **kwargs) -> "RoutingCluster": L1_cluster = self._find_lx_cluster_with_n_child_avail(Level.L1, 1) if not L1_cluster: - raise RuntimeError("Available L1 cluster not found!") + raise RuntimeError("available L1 cluster not found.") if not L1_cluster.add_child(cluster): - raise RuntimeError(f"Add child into L1 cluster failed!") + raise RuntimeError(f"add child to L1 cluster failed.") return cluster @@ -416,12 +418,13 @@ class RoutingGroup(List[CoreBlock]): """ def __init__(self, *cb: CoreBlock) -> None: - self.cb = list(cb) - + self.core_blocks = list(cb) self.assigned_coords: List[Coord] = [] - """Assigned core coordinates for the routing group.""" + """Assigned core coordinates in the routing group""" self.wasted_coords: List[Coord] = [] - """Wasted core coordinates for the routing group.""" + """Wasted core coordinates in routing group""" + self.wasted_core_plm: Dict[Coord, EmptyCorePlacement] = {} + """Wasted core placements""" def assign(self, assigned: List[Coord], wasted: List[Coord]) -> None: self.assigned_coords = assigned @@ -434,20 +437,40 @@ def assign(self, assigned: List[Coord], wasted: List[Coord]) -> None: cb.core_coords = assigned[cur_i : cur_i + n] cur_i += n + def core_block_alloc(self) -> None: + for cb in self: + cb.core_plm_alloc() + + # Allocate blank core placements for the wasted coordinates. + for coord in self.wasted_coords: + self.wasted_core_plm[coord] = EmptyCorePlacement.build(coord) + + def get_wasted_cplm_config(self) -> CorePlacementInfo: + return { + coord: core_plm.export_core_plm_config() + for coord, core_plm in self.wasted_core_plm.items() + } + + def get_n_core_occupied(self) -> int: + """Get the #N of cores occupied by the routing group.""" + return len(self.assigned_coords) + len(self.wasted_coords) + def __getitem__(self, idx: int) -> CoreBlock: - if idx >= len(self.cb) or idx < 0: - raise IndexError(f"Index out of range [0, {len(self.cb)}), {idx}.") + if idx >= len(self.core_blocks) or idx < 0: + raise IndexError( + f"index out of range [0, {len(self.core_blocks)}), ({idx})." + ) - return self.cb[idx] + return self.core_blocks[idx] def __len__(self) -> int: - return len(self.cb) + return len(self.core_blocks) def __iter__(self) -> Iterator[CoreBlock]: - return self.cb.__iter__() + return self.core_blocks.__iter__() def __contains__(self, key: CoreBlock) -> bool: - return key in self.cb + return key in self.core_blocks @property def n_core_required(self) -> int: @@ -474,7 +497,7 @@ def get_leaf_coord(self, cluster: "RoutingCluster") -> RoutingCoord: if path: return RoutingCoord(*path) - raise RuntimeError(f"Get leaf cluster {cluster.tag} coordinate failed.") + raise RuntimeError(f"get leaf cluster {cluster.tag} coordinate failed.") def insert_routing_group(self, routing_group: RoutingGroup) -> bool: """Insert a `RoutingGroup` in the routing tree. Assign each core blocks with \ diff --git a/paibox/backend/segment_utils.py b/paibox/backend/segment_utils.py index 6249c904..bea80f37 100644 --- a/paibox/backend/segment_utils.py +++ b/paibox/backend/segment_utils.py @@ -110,8 +110,8 @@ def _get_nsg_opt_core( if raise_warning: warnings.warn( - "When grouping neurons with 'core' optimization, unrolling " - "factor greater than 1 will be invalid. Modified to 1.", + "when grouping neurons with 'core' optimization, unrolling " + "factor greater than 1 is invalid. Modified to 1.", ParameterInvalidWarning, ) @@ -198,8 +198,8 @@ def _get_neu_slices( def _dense_reorganized( seg_slices_dict: Dict[NeuDyn, List[NeuSlice]], capacity: int, repl_prop: int ) -> NeuSegOfCoreBlock: - """Reorganize densely. Based on the result of 'latency' method, use greedy \ - algorithm to reorganize the incomplete neuron segments for saving cores. + """Reorganize densely. Based on the result of 'latency' method, use greedy algorithm to \ + reorganize the incomplete neuron segments for saving cores. """ def _find_neu_in_segs_of_cplm(neu: NeuDyn, seg_of_cplm: NeuSegOfCorePlm) -> bool: @@ -260,8 +260,8 @@ def get_neu_segments( """Get the neuron segments by given a optimization strategy. Args: - - optim_target: Target of optimization. 'catagory' strategy intends to optimize the throughput of nodes. \ - The 'dense' strategy intends to optimize the consumption of cores. + - optim_target: Target of optimization. 'catagory' strategy intends to optimize the throughput \ + of nodes. The 'dense' strategy intends to optimize the consumption of cores. """ if optim_target == "core": seg_slices_dict = _get_neu_slices_opt_core(neu_groups, capacity) @@ -303,7 +303,7 @@ def _seg_alloc(axon: SourceNodeType) -> AxonSegment: if offset + addr_width > fan_in_max: raise ResourceError( - f"Axons address out of range [0, {fan_in_max}), {offset + addr_width}." + f"axons address out of range [0, {fan_in_max}) ({offset + addr_width})." ) cur_offset = offset diff --git a/paibox/base.py b/paibox/base.py index 3af72fd2..e570a77b 100644 --- a/paibox/base.py +++ b/paibox/base.py @@ -1,15 +1,20 @@ import sys -from typing import Any, Dict, List, Literal, Optional, Set, Tuple +from typing import Any, ClassVar, Dict, List, Literal, Optional, Set, Tuple + +import numpy as np if sys.version_info >= (3, 10): from typing import TypeAlias else: from typing_extensions import TypeAlias +from paicorelib import WeightPrecision as WP + from .collector import Collector -from .generic import get_unique_name, is_name_unique from .mixin import ReceiveInputProj, StatusMemory, TimeRelatedNode +from .naming import get_unique_name, is_name_unique from .node import NodeDict, NodeList +from .types import WeightType __all__ = [] @@ -23,7 +28,12 @@ class PAIBoxObject: def __init__(self, name: Optional[str] = None) -> None: self._name: str = self.unique_name(name) - def __eq__(self, other) -> bool: + def __eq__(self, other: "PAIBoxObject") -> bool: + if not isinstance(other, PAIBoxObject): + raise TypeError( + f"cannot compare {type(self).__name__} with {type(other).__name__}." + ) + if self is other: return True @@ -59,44 +69,66 @@ def nodes( method: Literal["absolute", "relative"] = "absolute", level: int = -1, include_self: bool = True, + find_recursive: bool = False, ) -> Collector[str, "PAIBoxObject"]: - """Collect all the children nodes.""" - return self._find_nodes(method, level, include_self) + """Collect all child nodes. + + Args: + - method: the method to find the nodes. + - "absolute": the name of the node it is looking for will be `v.name`. + - "relative": the name will be its attribute name, `x` in `self.x = v`. + - level: the level at which the search ends. + - include_self: whether to include the current node itself. + - find_recursive: whether to search for nodes recursively until they are not found. + """ + return self._find_nodes(method, level, include_self, find_recursive) def _find_nodes( self, method: Literal["absolute", "relative"] = "absolute", level: int = -1, include_self: bool = True, - lid: int = 0, + find_recursive: bool = False, + _lid: int = 0, _paths: Optional[Set[_IdPathType]] = None, + _iter_termination: bool = False, ) -> Collector[str, "PAIBoxObject"]: if _paths is None: _paths = set() gather = Collector() + if include_self: if method == "absolute": gather[self.name] = self else: gather[""] = self - if (level > -1) and (lid >= level): - return gather + if find_recursive: + if _iter_termination: + return gather + else: + if (level > -1) and (_lid >= level): + return gather + + iter_termi = True # iteration termination flag def _find_nodes_absolute() -> None: - nonlocal gather, nodes + nonlocal gather, nodes, iter_termi for v in self.__dict__.values(): if isinstance(v, PAIBoxObject): + iter_termi = False _add_node2(self, v, _paths, gather, nodes) elif isinstance(v, NodeList): for v2 in v: if isinstance(v2, PAIBoxObject): + iter_termi = False _add_node2(self, v2, _paths, gather, nodes) elif isinstance(v, NodeDict): for v2 in v.values(): if isinstance(v2, PAIBoxObject): + iter_termi = False _add_node2(self, v2, _paths, gather, nodes) # finding nodes recursively @@ -106,24 +138,29 @@ def _find_nodes_absolute() -> None: method=method, level=level, include_self=include_self, - lid=lid + 1, + find_recursive=find_recursive, + _lid=_lid + 1, _paths=_paths, + _iter_termination=iter_termi, ) ) def _find_nodes_relative() -> None: - nonlocal gather, nodes + nonlocal gather, nodes, iter_termi for k, v in self.__dict__.items(): if isinstance(v, PAIBoxObject): + iter_termi = False _add_node1(self, k, v, _paths, gather, nodes) elif isinstance(v, NodeList): for i, v2 in enumerate(v): if isinstance(v2, PAIBoxObject): + iter_termi = False _add_node1(self, f"{k}-{str(i)}", v2, _paths, gather, nodes) elif isinstance(v, NodeDict): for k2, v2 in v.items(): if isinstance(v2, PAIBoxObject): + iter_termi = False _add_node1(self, f"{k}.{k2}", v2, _paths, gather, nodes) # finding nodes recursively @@ -132,8 +169,10 @@ def _find_nodes_relative() -> None: method=method, level=level, include_self=include_self, - lid=lid + 1, + find_recursive=find_recursive, + _lid=_lid + 1, _paths=_paths, + _iter_termination=iter_termi, ).items(): if k2: gather[f"{k1}.{k2}"] = v2 @@ -149,7 +188,7 @@ def _find_nodes_relative() -> None: def _add_node1( - obj: object, + obj: Any, k: str, v: PAIBoxObject, _paths: Set[_IdPathType], @@ -165,7 +204,7 @@ def _add_node1( def _add_node2( - obj: object, + obj: Any, v: PAIBoxObject, _paths: Set[_IdPathType], gather: Collector[str, PAIBoxObject], @@ -195,10 +234,12 @@ def reset_state(self, *args, **kwargs): @property def shape_in(self) -> Tuple[int, ...]: + """Actual shape of input.""" raise NotImplementedError @property def shape_out(self) -> Tuple[int, ...]: + """Actual shape of output.""" raise NotImplementedError @property @@ -240,7 +281,7 @@ def export_params(self) -> Dict[str, Any]: if sys.version_info >= (3, 9): params.update({k.removeprefix("_"): v}) else: - params.update({k.lstrip("_"): v}) + params.update({k.lstrip("_"): v}) # compatible for py3.8 return params @@ -272,3 +313,32 @@ def unrolling_factor(self, factor: int) -> None: raise ValueError(f"'unrolling_factor' must be positive, but got {factor}.") self._unrolling_factor = factor + + +class SynSys(DynamicSys): + CFLAG_ENABLE_WP_OPTIMIZATION: ClassVar[bool] = True + """Compilation flag for weight precision optimization.""" + + @property + def weights(self) -> WeightType: + raise NotImplementedError + + @property + def weight_precision(self) -> WP: + raise NotImplementedError + + @property + def connectivity(self) -> WeightType: + raise NotImplementedError + + @property + def n_axon_each(self) -> np.ndarray: + return np.sum(self.connectivity, axis=0) + + @property + def num_axon(self) -> int: + return np.count_nonzero(np.any(self.connectivity, axis=1)) + + @property + def num_dendrite(self) -> int: + return np.count_nonzero(np.any(self.connectivity, axis=0)) diff --git a/paibox/collector.py b/paibox/collector.py index bf15c24e..fd70e4bb 100644 --- a/paibox/collector.py +++ b/paibox/collector.py @@ -10,7 +10,7 @@ def __setitem__(self, key: _KT, value: _VT) -> None: if key in self: if id(self[key]) != id(value): raise ValueError( - f"Name '{key}' conflicts: same name for {value} and {self[key]}." + f"mame '{key}' conflicts: same name for {value} & {self[key]}." ) super().__setitem__(key, value) @@ -30,7 +30,7 @@ def update( ) -> Union["Collector[_KT, _VT]", "Collector[_KT, _T]"]: if not isinstance(other, (dict, list, tuple)): raise TypeError( - f"Excepted a dict, list or sequence, but we got {other}, type {type(other)}" + f"expected a dict, list or sequence, but got {other}, type {type(other)}." ) if isinstance(other, dict): @@ -56,6 +56,7 @@ def __add__( Arguments: - other: the other dictionary. + Returns: - gather: the new collector. """ @@ -75,7 +76,7 @@ def __sub__( ) -> Union["Collector[_KT, _VT]", "Collector[str, _T]"]: if not isinstance(other, (dict, list, tuple)): raise TypeError( - f"Excepted a dict, list or sequence, but we got {other}, type {type(other)}" + f"expected a dict, list or sequence, but got {other}, type {type(other)}." ) gather = type(self)(self) @@ -83,11 +84,11 @@ def __sub__( if isinstance(other, dict): for k, v in other.items(): if k not in gather.keys(): - raise ValueError(f"Cannot find '{k}' in {self.keys()}.") + raise ValueError(f"cannot find '{k}' in {self.keys()}.") if id(v) != id(gather[k]): raise ValueError( - f"Cannot remove '{k}', since there's two different values:" + f"cannot remove '{k}', since there are two different values: " f"{v} != {gather[k]}" ) gather.pop(k) @@ -109,7 +110,7 @@ def __sub__( for k in set(keys_to_remove): if k not in gather: - raise KeyError(f"Key '{k}' not found. Removed failed.") + raise KeyError(f"key '{k}' not found. Removed failed.") gather.pop(k) diff --git a/paibox/context.py b/paibox/context.py index b5587baf..9884124d 100644 --- a/paibox/context.py +++ b/paibox/context.py @@ -19,7 +19,7 @@ def load(self, key: Any, default: Any = None) -> Any: return super().__getitem__(key) if default is None: - raise KeyError(f"The context of '{key}' not found.") + raise KeyError(f"the context of '{key}' not found.") return default @@ -27,7 +27,7 @@ def save(self, *args, **kwargs) -> None: """Save the context by the key-value pairs.""" if len(args) % 2 > 0: raise TypeError( - f"Expected even positional arguments but odd given ({len(args)})" + f"expected even positional arguments, but odd given ({len(args)})." ) for i in range(0, len(args), 2): diff --git a/paibox/exceptions.py b/paibox/exceptions.py index bb593813..8cf17395 100644 --- a/paibox/exceptions.py +++ b/paibox/exceptions.py @@ -62,3 +62,9 @@ class TruncationWarning(PAIBoxWarning, UserWarning): """Value out of range & will be truncated.""" pass + + +class AutoOptimizationWarning(PAIBoxWarning): + """Parameters are optimized automatically by PAIBox.""" + + pass diff --git a/paibox/mixin.py b/paibox/mixin.py index 508e245e..7af5f923 100644 --- a/paibox/mixin.py +++ b/paibox/mixin.py @@ -9,7 +9,7 @@ from .context import _FRONTEND_CONTEXT from .exceptions import RegisterError -from .generic import get_unique_name +from .naming import get_unique_name from .node import NodeDict _T = TypeVar("_T") @@ -28,10 +28,7 @@ def wrapper(*args, **kwargs): def prevent(func): - """ - Decorate func with this to prevent raising an Exception when \ - an error is encountered. - """ + """Decorate function with this to prevent raising an Exception when an error is encountered.""" @wraps(func) def wrapper(*args, **kwargs): @@ -44,11 +41,9 @@ def wrapper(*args, **kwargs): def check(attr): + """Decorate function with this to check whether the object has an attribute with the given name.""" + def decorator(method): - """ - Decorate method with this to check whether the object \ - has an attribute with the given name. - """ @wraps(method) def wrapper(self, *args, **kwargs): @@ -75,7 +70,7 @@ def __getitem__(self, item: str) -> Any: if item in self.children: return self.children[item] - raise KeyError(f"Key '{item}' not found.") + raise KeyError(f"key '{item}' not found.") def _get_elem_name(self, elem: object) -> str: if isinstance(elem, pb.base.PAIBoxObject): @@ -99,7 +94,7 @@ def elem_format( for c in child: if not isinstance(c, child_type): raise ValueError( - f"Expect type {child_type.__name__}, but got {type(c)}." + f"expect type {child_type.__name__}, but got {type(c)}." ) elems[self._get_elem_name((c))] = c @@ -107,18 +102,18 @@ def elem_format( for k, v in child.items(): if not isinstance(v, child_type): raise ValueError( - f"Expect type {child_type.__name__}, but got {type(c)}." + f"expect type {child_type.__name__}, but got {type(c)}." ) elems[k] = v else: raise TypeError( - f"Expect elements in dict, list or tuple, but got {type(child)}." + f"expect elements in dict, list or tuple, but got {type(child)}." ) for k, v in children_as_dict.items(): if not isinstance(v, child_type): raise ValueError( - f"Expect type {child_type.__name__}, but got {type(v)}." + f"expect type {child_type.__name__}, but got {type(v)}." ) elems[k] = v @@ -134,7 +129,7 @@ class ReceiveInputProj(MixIn): def register_master(self, key: str, master_target) -> None: if key in self.master_nodes: - raise RegisterError(f"Master node with key '{key}' already exists.") + raise RegisterError(f"master node with key '{key}' already exists.") self.master_nodes[key] = master_target @@ -142,7 +137,7 @@ def unregister_master(self, key: str, strict: bool = True) -> Optional[Any]: if key in self.master_nodes: return self.master_nodes.pop(key, None) elif strict: - raise KeyError(f"Key '{key}' not found in master nodes.") + raise KeyError(f"key '{key}' not found in master nodes.") def get_master_node(self, key: str) -> Optional[Any]: return self.master_nodes.get(key, None) @@ -189,17 +184,17 @@ def __init__(self) -> None: def set_memory(self, name: str, value: Any) -> None: if hasattr(self, name): - raise AttributeError(f"'{name}' has been set as a member variable!") + raise AttributeError(f"'{name}' has been set as a member variable.") self._memories[name] = value self.set_reset_value(name, value) - def reset(self, name: Optional[str] = None) -> None: + def reset_memory(self, name: Optional[str] = None) -> None: if isinstance(name, str): if name in self._memories: self._memories[name] = deepcopy(self._memories_rv[name]) else: - raise KeyError(f"Key '{name}' not found!") + raise KeyError(f"key '{name}' not found.") else: for k in self._memories.keys(): self._memories[k] = deepcopy(self._memories_rv[k]) @@ -209,11 +204,11 @@ def set_reset_value(self, name: str, init_value: Any) -> None: def __getattr__(self, name: str) -> Any: if "_memories" in self.__dict__: - _memories = self.__dict__.get("_memories") - if _memories is not None and name in _memories: + _memories = self.__dict__["_memories"] + if name in _memories: return _memories[name] - raise AttributeError(f"Attribute '{name}' not found!") + raise AttributeError(f"attribute '{name}' not found.") def __setattr__(self, name: str, value: Any) -> None: _memories = self.__dict__.get("_memories") diff --git a/paibox/generic.py b/paibox/naming.py similarity index 81% rename from paibox/generic.py rename to paibox/naming.py index 276983e0..de43c242 100644 --- a/paibox/generic.py +++ b/paibox/naming.py @@ -12,12 +12,12 @@ def is_name_unique(name: str, obj: object) -> None: Otherwise raise an error. """ if not name.isidentifier(): - raise ValueError(f"{name} is not a valid identifier") + raise ValueError(f"'{name}' is not a valid identifier.") if name in _id_dict: if _id_dict[name] != id(obj): raise RegisterError( - f"Name of {obj}({name}) is already used by {_id_dict[name]}" + f"name of {obj}({name}) is already used by {_id_dict[name]}." ) else: @@ -41,4 +41,4 @@ def clear_name_cache(ignore_warn: bool = False) -> None: _type_names.clear() if not ignore_warn: - warnings.warn(f"All named models & ids are cleared.", PAIBoxWarning) + warnings.warn(f"all named models & ids are cleared.", PAIBoxWarning) diff --git a/paibox/network.py b/paibox/network.py index f3ae7bb5..98c1772d 100644 --- a/paibox/network.py +++ b/paibox/network.py @@ -8,16 +8,18 @@ from .collector import Collector from .exceptions import PAIBoxWarning, RegisterError from .mixin import Container +from .neuron import Neuron from .node import NodeDict from .projection import InputProj, Projection from .synapses import RIGISTER_MASTER_KEY_FORMAT, SynSys __all__ = ["DynSysGroup", "Network"] -ComponentsType: TypeAlias = Union[InputProj, NeuDyn, SynSys] +ComponentsType: TypeAlias = Union[InputProj, Neuron, SynSys] class DynSysGroup(DynamicSys, Container): + def __init__( self, *components_as_tuple, @@ -31,14 +33,12 @@ def __init__( ) def update(self, **kwargs) -> None: - """For a network, the operating nodes within it will be distributed according to the network level \ - where they are located. For I, S & N, if the network is a two-level nested network, it can be \ - divided into Ix, Sx, Nx and Iy, Sy, Ny, where x & y are two parts containing many operations. \ - - TODO Prove that the operation sequence I->S->N can be divided into Ix->Sx->Nx->Iy->Sy->Ny & it has \ - nothing to do with the network topology. - """ - nodes = self.nodes(level=1, include_self=False).subset(DynamicSys).unique() + """Find nodes of the network recursively.""" + nodes = ( + self.nodes(include_self=False, find_recursive=True) + .subset(DynamicSys) + .unique() + ) for node in nodes.subset(Projection).values(): node(**kwargs) @@ -49,13 +49,12 @@ def update(self, **kwargs) -> None: for node in nodes.subset(NeuDyn).values(): node() - for node in ( - nodes.not_subset(Projection).not_subset(SynSys).not_subset(NeuDyn).values() - ): - node() - def reset_state(self) -> None: - nodes = self.nodes(level=1, include_self=False).subset(DynamicSys).unique() + nodes = ( + self.nodes(include_self=False, find_recursive=True) + .subset(DynamicSys) + .unique() + ) for node in nodes.subset(Projection).values(): node.reset_state() @@ -66,20 +65,15 @@ def reset_state(self) -> None: for node in nodes.subset(NeuDyn).values(): node.reset_state() - for node in ( - nodes.not_subset(Projection).not_subset(SynSys).not_subset(NeuDyn).values() - ): - node.reset_state() - def __call__(self, **kwargs) -> None: return self.update(**kwargs) def add_components(self, *implicit: DynamicSys, **explicit: DynamicSys) -> None: - """Add new components. When a component is passed in explicitly, its tag name \ - can be specified. Otherwise `.name` will be used. + """Add new components. When a component is passed in explicitly, its tag name can \ + be specified. Otherwise `.name` will be used. - NOTE: After instantiated the components outside the `DynSysGroup`, you should \ - call `add_components()` to actually add the new components to itself. + NOTE: After instantiated the components outside the `DynSysGroup`, you should call \ + `add_components()` to actually add the new components to itself. """ for comp in implicit: setattr(self, comp.name, comp) @@ -87,124 +81,165 @@ def add_components(self, *implicit: DynamicSys, **explicit: DynamicSys) -> None: for tag, comp in explicit.items(): setattr(self, tag, comp) - def _remove_component(self, remove: DynamicSys) -> None: - """Remove a component in the network.""" - for tag, obj in self.__dict__.items(): - if obj is remove: - delattr(self, tag) - break - - return None - - def _disconnect_neudyn( - self, - neudyn_a: NeuDyn, - condition: Callable[[SynSys], bool], - neudyn_b: Optional[NeuDyn] = None, - remove_syn: bool = True, - ) -> List[SynSys]: - nodes = self.nodes(level=1, include_self=False).subset(DynamicSys).unique() - - if neudyn_b is None: - self._assert_neudyn(nodes, neudyn_a) - else: - self._assert_neudyn(nodes, neudyn_a, neudyn_b) + def disconnect_syn( + self, target_syn: SynSys, exclude_source: bool = False + ) -> SynSys: + """Disconnect a synapse in the nwtwork. - target_syns = self._find_syn_to_unregi(nodes, condition) + Args: + - target_syn: target synapse. + - exclude_source: whether to disconnect the source. If so, remove the synapse \ + from the network - if target_syns: - for syn in target_syns: - self._disconnect_syn(syn) + Returns: the disconnected synapse. + """ + ret = target_syn.dest.unregister_master( + RIGISTER_MASTER_KEY_FORMAT.format(target_syn.name) + ) + if ret is not target_syn: + raise RegisterError("unregister failed.") - # FIXME The disconnected synapses will not effect the simulation. - # However, it will effect the placement in the backend. - if remove_syn: - self._remove_component(syn) + if not exclude_source: + self._remove_component(target_syn) - return target_syns - else: - warnings.warn("There is no synapse to unregister.", PAIBoxWarning) - return [] + return target_syn - def disconnect_neudyn_from( - self, neudyn_a: NeuDyn, neudyn_b: NeuDyn, remove: bool = True + def disconnect_neuron_from( + self, neuron_a: Neuron, neuron_b: Neuron ) -> List[SynSys]: - """Disconnect synapses between `NeuDyn` A & B. + """Disconnect synapses between `Neuron` A & B and remove the synapses from the network. Args: - - neudyn_a: target `NeuDyn` A. - - neudyn_b: target `NeuDyn` B. - - remove: whether to remove the original synapses from the network. + - neuron_a: target neuron A. + - neuron_b: target neuron B. - Returns: the disconnected synapses. + Returns: the disconnected synapses in list. """ - return self._disconnect_neudyn( - neudyn_a, - lambda syn: syn.source is neudyn_a and syn.dest is neudyn_b, - neudyn_b, - remove, + return self._disconn_neuron( + neuron_a, + lambda syn: syn.source is neuron_a and syn.dest is neuron_b, + neuron_b, + remove_syn=True, ) - def diconnect_neudyn_succ( - self, neudyn: NeuDyn, remove: bool = True - ) -> List[SynSys]: - """Disconnect successor synapses of `neudyn`. + # Not sure about specific needs + # def diconnect_neuron_succ(self, neuron: Neuron) -> List[SynSys]: + # """Disconnect successor synapses of `neuron`. - Args: - - neudyn: target `NeuDyn`. - - remove: whether to remove the original synapses from the network. + # Args: + # - neuron: target neuron. + # - remove: whether to remove the original synapses from the network. + # - new_source: only valid when `remove` is false. - Returns: the disconnected synapses. - """ - return self._disconnect_neudyn( - neudyn, lambda syn: syn.source is neudyn, remove_syn=remove - ) + # Returns: the disconnected synapses. + # """ + # return self._disconn_neuron( + # neuron, lambda syn: syn.source is neuron, remove_syn=True + # ) - def diconnect_neudyn_pred( - self, neudyn: NeuDyn, remove: bool = True - ) -> List[SynSys]: - """Disconnect predecessor synapses of `neudyn`. + # def replace_neuron_succ(self, neuron: Neuron, new_source: Neuron) -> List[SynSys]: + # """Replace the source of successor synapses of `neuron` with new one.""" + # disconn_syns = self._disconn_neuron( + # neuron, lambda syn: syn.source is neuron, remove_syn=False + # ) - Args: - - neudyn: target `NeuDyn`. - - remove: whether to remove the original synapses from the network. + # for syn in disconn_syns: + # syn.source = new_source - Returns: the disconnected synapses. - """ - return self._disconnect_neudyn( - neudyn, lambda syn: syn.dest is neudyn, remove_syn=remove - ) + # return disconn_syns + + # def replace_neuron_pred(self, neuron: Neuron, new_source: Neuron) -> List[SynSys]: + # """Replace the destination of predecessor synapses of `neuron` with new one. + + # Args: + # - neuron: target neuron. + # - remove: whether to remove the original synapses from the network. + + # Returns: the disconnected synapses. + # """ + # disconn_syns = self._disconn_neuron( + # neuron, lambda syn: syn.dest is neuron, remove_syn=False + # ) - def insert_neudyn( + # for syn in disconn_syns: + # syn.dest = new_source + + # return disconn_syns + + def insert_between_neuron( self, - neudyn_a: NeuDyn, - neudyn_b: NeuDyn, - components_to_insert: Tuple[ComponentsType, ...], + neuron_a: Neuron, + neuron_b: Neuron, + cpn_to_insert: Tuple[ComponentsType, ...], replace: bool = True, - remove: bool = True, ) -> List[SynSys]: - """Insert new components between `NeuDyn` A & B. + """Insert new components between `Neuron` A & B. Args: - - neudyn_a: target `NeuDyn` A. - - neudyn_b: target `NeuDyn` B. - - components_to_insert: new components to insert between `neudyn_a` & `neudyn_b`. + - neuron_a: target neuron A. + - neuron_b: target neuron B. + - cpn_to_insert: components to insert between `neuron_a` & `neuron_b`. - replace: whether to disconnect the original synapses. Default is `True`. - - remove: whether to remove the original synapses from the network. Valid only when `replace` is `True`. - Returns: the disconnected synapses. + Returns: the disconnected synapses in list. """ if replace: - removed_syn = self.disconnect_neudyn_from(neudyn_a, neudyn_b, remove=remove) + removed_syn = self.disconnect_neuron_from(neuron_a, neuron_b) else: removed_syn = [] - self.add_components(*components_to_insert) + self.add_components(*cpn_to_insert) return removed_syn + def _remove_component(self, remove: DynamicSys) -> None: + """Remove a component in the network.""" + for tag, obj in self.__dict__.items(): + if obj is remove: + delattr(self, tag) + break + + return None + + def _disconn_neuron( + self, + neuron_a: Neuron, + condition: Callable[[SynSys], bool], + neuron_b: Optional[Neuron] = None, + remove_syn: bool = True, + ) -> List[SynSys]: + nodes = ( + self.nodes(include_self=False, find_recursive=True) + .subset(DynamicSys) + .unique() + ) + + if neuron_b is None: + self._assert_neuron(nodes, neuron_a) + else: + self._assert_neuron(nodes, neuron_a, neuron_b) + + target_syns = self._find_syn_to_disconn(nodes, condition) + + if target_syns: + for syn in target_syns: + self._disconn_syn(syn) + + # The disconnected synapses will not effect the simulation, but will + # effect the placement in the backend. + # If the disconnected synapses aren't removed from the network, do cleaning + # before the compilation in the backend. + # TODO Add a pre-processing step before the compilation. + if remove_syn: + self._remove_component(syn) + + return target_syns + else: + warnings.warn("there is no synapses to disconnect.", PAIBoxWarning) + return [] + @staticmethod - def _find_syn_to_unregi( + def _find_syn_to_disconn( nodes: Collector, condition: Callable[[SynSys], bool] ) -> List[SynSys]: syns = [] @@ -216,19 +251,27 @@ def _find_syn_to_unregi( return syns @staticmethod - def _disconnect_syn(target_syn: SynSys) -> None: + def _disconn_syn(target_syn: SynSys) -> None: + ret = target_syn.dest.unregister_master( + RIGISTER_MASTER_KEY_FORMAT.format(target_syn.name) + ) + if ret is not target_syn: + raise RegisterError("unregister failed.") + + @staticmethod + def _disconn_succ_syn(target_syn: SynSys) -> None: ret = target_syn.dest.unregister_master( RIGISTER_MASTER_KEY_FORMAT.format(target_syn.name) ) if ret is not target_syn: - raise RegisterError("Unregister failed!") + raise RegisterError("unregister failed.") @staticmethod - def _assert_neudyn(nodes: Collector, *neudyns: NeuDyn) -> None: - neu_dyns = nodes.subset(NeuDyn) + def _assert_neuron(nodes: Collector, *neurons: Neuron) -> None: + neu_dyns = nodes.subset(Neuron) - if any(neudyn not in neu_dyns.values() for neudyn in neudyns): - raise ValueError("Not all NeuDyn found in the network.") + if any(neuron not in neu_dyns.values() for neuron in neurons): + raise ValueError("not all neurons found in the network.") Network: TypeAlias = DynSysGroup @@ -259,11 +302,11 @@ def __getitem__(self, item: Union[str, int, slice]): if item in self.children: return self.children[item] else: - raise KeyError(f"Key {item} not found.") + raise KeyError(f"key '{item}' not found.") if isinstance(item, int): if item > len(self): - raise IndexError(f"Index out of range: {item}") + raise IndexError(f"index out of range {item}.") return tuple(self.children.values())[item] @@ -271,7 +314,7 @@ def __getitem__(self, item: Union[str, int, slice]): return Sequential(**dict(tuple(self.children.items())[item])) raise TypeError( - f"Expected type str, int or slice, but got {item}, type {type(item)}" + f"expected type str, int or slice, but got {item}, type {type(item)}." ) def __len__(self) -> int: diff --git a/paibox/neuron/__init__.py b/paibox/neuron/__init__.py index 3b2a4223..9fe79ea0 100644 --- a/paibox/neuron/__init__.py +++ b/paibox/neuron/__init__.py @@ -1,12 +1,8 @@ -from .base import Neuron as Neuron +from .base import Neuron from .neurons import IF as IF from .neurons import LIF as LIF +from .neurons import Always1Neuron as Always1Neuron from .neurons import PhasicSpiking as PhasicSpiking from .neurons import TonicSpiking as TonicSpiking -__all__ = [ - "IF", - "LIF", - "TonicSpiking", - "PhasicSpiking", -] +__all__ = ["IF", "LIF", "TonicSpiking", "PhasicSpiking", "Always1Neuron"] diff --git a/paibox/neuron/base.py b/paibox/neuron/base.py index f3cc51cc..f2346fc9 100644 --- a/paibox/neuron/base.py +++ b/paibox/neuron/base.py @@ -14,18 +14,14 @@ MaxPoolingEnable, SpikeWidthFormat, ) -from typing_extensions import TypeAlias from paibox.base import NeuDyn -from paibox.types import Shape, SpikeType +from paibox.types import Shape, SpikeType, VoltageType from paibox.utils import as_shape, shape2num -__all__ = ["Neuron"] +from .utils import _vjt_overflow -VoltageType: TypeAlias = NDArray[np.int32] -VJT_MAX_LIMIT: int = 2**29 - 1 -VJT_MIN_LIMIT: int = -(2**29) -VJT_LIMIT: int = 2**30 +__all__ = ["Neuron"] class MetaNeuron: @@ -36,13 +32,13 @@ def __init__( shape: Shape, reset_mode: RM, reset_v: int, - leaking_comparison: LCM, + leak_comparison: LCM, threshold_mask_bits: int, neg_thres_mode: NTM, neg_threshold: int, pos_threshold: int, - leaking_direction: LDM, - leaking_integration_mode: LIM, + leak_direction: LDM, + leak_integration_mode: LIM, leak_v: int, synaptic_integration_mode: SIM, bit_truncation: int, @@ -58,17 +54,16 @@ def __init__( # They will be exported to the parameter verification model. self.reset_mode: RM = reset_mode self.reset_v: int = reset_v # Signed 30-bit - self.leaking_comparison: LCM = leaking_comparison + self.leak_comparison: LCM = leak_comparison self.threshold_mask_bits: int = threshold_mask_bits self.neg_thres_mode: NTM = neg_thres_mode self.neg_threshold: int = neg_threshold # Unsigned 29-bit self.pos_threshold: int = pos_threshold # Unsigned 29-bit - self.leaking_direction: LDM = leaking_direction - self.leaking_integration_mode: LIM = leaking_integration_mode + self.leak_direction: LDM = leak_direction + self.leak_integration_mode: LIM = leak_integration_mode self.leak_v: int = leak_v # Signed 30-bit self.synaptic_integration_mode: SIM = synaptic_integration_mode self.bit_truncation: int = bit_truncation # Unsigned 5-bit - self._vjt_init = 0 # Signed 30-bit. Fixed. # TODO These two config below are parameters of CORE. self._spike_width_format: SpikeWidthFormat @@ -93,11 +88,10 @@ def _neuronal_charge( `vjt` = `vjt_pre` + `_rho_w_ij` * \sum^{N-1}_{i=0} * x_i(t) * w_{i,j} """ _rho_w_ij = 1 # Random synaptic integration enable, 0/1 - xt = self.init_param(0).astype(np.int32) if self.synaptic_integration_mode is SIM.MODE_STOCHASTIC: raise NotImplementedError( - f"Mode {SIM.MODE_STOCHASTIC.name} not implemented." + f"mode {SIM.MODE_STOCHASTIC.name} is not implemented." ) else: if incoming_v.ndim == 2: @@ -107,16 +101,16 @@ def _neuronal_charge( v_charged = np.add(vjt_pre, _v).astype(np.int32) - return v_charged + return _vjt_overflow(v_charged) # Handle with overflow here def _neuronal_leak(self, vjt: VoltageType) -> VoltageType: - r"""2. Leaking integration. + r"""2. Leak integration. - 2.1 Leaking direction, forward or reversal. - If leaking direction is `MODE_FORWARD`, the `_ld` is 1, else is \sgn{`vjt`}. + 2.1 Leak direction, forward or reversal. + If leak direction is `MODE_FORWARD`, the `_ld` is 1, else is \sgn{`vjt`}. - 2.2 Random leaking. - If leaking integration is `MODE_DETERMINISTIC`, then + 2.2 Random leak. + If leak integration is `MODE_DETERMINISTIC`, then `vjt` = `vjt` + `_ld` * `leak_v` else (`MODE_STOCHASTIC`) if abs(`leak_v`) >= `_rho_j_lambda`, then @@ -126,18 +120,18 @@ def _neuronal_leak(self, vjt: VoltageType) -> VoltageType: `vjt` = `vjt` + \sgn{`leak_v`}* `_ld` * `_F` """ - _rho_j_lambda = 2 # Random leaking, unsigned 29-bit. + _rho_j_lambda = 2 # Random leak, unsigned 29-bit. - if self.leaking_direction is LDM.MODE_FORWARD: - _ld = np.ones(self.varshape, dtype=np.bool_) + if self.leak_direction is LDM.MODE_FORWARD: + _ld = np.ones((self._n_neuron,), dtype=np.bool_) else: _ld = np.sign(vjt) - if self.leaking_integration_mode is LIM.MODE_DETERMINISTIC: + if self.leak_integration_mode is LIM.MODE_DETERMINISTIC: v_leaked = np.add(vjt, _ld * self.leak_v).astype(np.int32) else: raise NotImplementedError( - f"Mode {LIM.MODE_STOCHASTIC.name} is not implemented." + f"mode {LIM.MODE_STOCHASTIC.name} is not implemented." ) # _F = 1 if abs(self.leak_v) >= _rho_j_lambda else 0 # sgn_leak_v = fn_sgn(self.leak_v, 0) @@ -213,7 +207,7 @@ def _neuronal_reset(self, vjt: VoltageType) -> VoltageType: def _when_exceed_pos() -> VoltageType: if self.reset_mode is RM.MODE_NORMAL: - return np.full(self.varshape, self.reset_v, dtype=np.int32) + return np.full((self._n_neuron,), self.reset_v, dtype=np.int32) elif self.reset_mode is RM.MODE_LINEAR: return np.subtract( @@ -225,7 +219,7 @@ def _when_exceed_pos() -> VoltageType: def _when_exceed_neg() -> VoltageType: if self.neg_thres_mode is NTM.MODE_RESET: if self.reset_mode is RM.MODE_NORMAL: - return np.full(self.varshape, -self.reset_v, dtype=np.int32) + return np.full((self._n_neuron,), -self.reset_v, dtype=np.int32) elif self.reset_mode is RM.MODE_LINEAR: return np.add( vjt, @@ -236,7 +230,7 @@ def _when_exceed_neg() -> VoltageType: return vjt else: - return np.full(self.varshape, -self.neg_threshold, dtype=np.int32) + return np.full((self._n_neuron,), -self.neg_threshold, dtype=np.int32) # USE "=="! v_reset = np.where( @@ -281,11 +275,11 @@ def _relu(self, vj: VoltageType) -> VoltageType: def _when_exceed_pos() -> VoltageType: if self._spike_width_format is SpikeWidthFormat.WIDTH_1BIT: - return np.ones(self.varshape, dtype=np.int32) + return np.ones((self._n_neuron,), dtype=np.int32) if self.bit_truncation >= 8: return np.full( - self.varshape, + (self._n_neuron,), ((vj >> self.bit_truncation) - 8) & ((1 << 8) - 1), dtype=np.int32, ) @@ -293,17 +287,17 @@ def _when_exceed_pos() -> VoltageType: _mask = (1 << self.bit_truncation) - 1 _truncated_vj = vj & _mask return np.full( - self.varshape, + (self._n_neuron,), _truncated_vj << (8 - self.bit_truncation), dtype=np.int32, ) else: - return np.zeros(self.varshape, dtype=np.int32) + return np.zeros((self._n_neuron,), dtype=np.int32) y = np.where( vj >= self.pos_threshold, _when_exceed_pos(), - np.zeros(self.varshape, dtype=np.int32), + np.zeros((self._n_neuron,), dtype=np.int32), ).astype(np.int32) return y @@ -329,7 +323,7 @@ def update( v_charged = self._neuronal_charge(incoming_v, vjt_pre) # 2. Leak & fire - if self.leaking_comparison is LCM.LEAK_BEFORE_COMP: + if self.leak_comparison is LCM.LEAK_BEFORE_COMP: v_leaked = self._neuronal_leak(v_charged) spike = self._neuronal_fire(v_leaked) else: @@ -359,8 +353,6 @@ def bias(self) -> int: class Neuron(MetaNeuron, NeuDyn): _excluded_vars = ( - "_vjt_init", - "vjt_pre", "vjt", "vj", "y", @@ -375,18 +367,18 @@ class Neuron(MetaNeuron, NeuDyn): def __init__( self, shape: Shape, - reset_mode: RM, - reset_v: int, - leaking_comparison: LCM, - threshold_mask_bits: int, - neg_thres_mode: NTM, - neg_threshold: int, - pos_threshold: int, - leaking_direction: LDM, - leaking_integration_mode: LIM, - leak_v: int, - synaptic_integration_mode: SIM, - bit_truncation: int, + reset_mode: RM = RM.MODE_NORMAL, + reset_v: int = 0, + leak_comparison: LCM = LCM.LEAK_AFTER_COMP, + threshold_mask_bits: int = 0, + neg_thres_mode: NTM = NTM.MODE_RESET, + neg_threshold: int = -1, + pos_threshold: int = 1, + leak_direction: LDM = LDM.MODE_FORWARD, + leak_integration_mode: LIM = LIM.MODE_DETERMINISTIC, + leak_v: int = 0, + synaptic_integration_mode: SIM = SIM.MODE_DETERMINISTIC, + bit_truncation: int = 0, *, delay: int = 1, tick_wait_start: int = 1, @@ -397,17 +389,17 @@ def __init__( ) -> None: if neg_threshold > 0: raise ValueError( - f"Negative threshold must be non-positive, but got {neg_threshold}." + f"negative threshold must be non-positive, but got {neg_threshold}." ) if pos_threshold < 0: raise ValueError( - f"Positive threshold must be non-negative, but got {pos_threshold}." + f"positive threshold must be non-negative, but got {pos_threshold}." ) if bit_truncation < 0: raise ValueError( - f"Bit of tuncation must be non-negative, but got {bit_truncation}." + f"bit of tuncation must be non-negative, but got {bit_truncation}." ) if delay < 1: @@ -432,13 +424,13 @@ def __init__( shape, reset_mode, reset_v, - leaking_comparison, + leak_comparison, threshold_mask_bits, neg_thres_mode, (-neg_threshold), # In `MetaNeuron`, it is unsgined. pos_threshold, - leaking_direction, - leaking_integration_mode, + leak_direction, + leak_integration_mode, leak_v, synaptic_integration_mode, bit_truncation, @@ -447,12 +439,12 @@ def __init__( super(MetaNeuron, self).__init__(name) """Stateful attributes. Vector.""" - self.set_memory("_vjt", self.init_param(self._vjt_init).astype(np.int32)) - self.set_memory("vjt_pre", self.init_param(self._vjt_init).astype(np.int32)) + # Initial vjt is fixed at 0. + self.set_memory("_vjt", self.init_param(0).astype(np.int32)) self.set_memory("_inner_spike", self.init_param(0).astype(np.bool_)) # Not supported for attributes in ANN mode - self.set_memory("vj", self.init_param(self._vjt_init).astype(np.int32)) + self.set_memory("vj", self.init_param(0).astype(np.int32)) self.set_memory("y", self.init_param(0).astype(np.int32)) """Auxiliary internal stateful attributes for debugging""" @@ -498,20 +490,8 @@ def update( if x is None: x = self.sum_inputs() - # If the incoming membrane potential (30-bit signed) overflows, the chip will automatically handle it. - # This behavior needs to be implemented during simulation. - incoming_v = np.where( - x > VJT_MAX_LIMIT, - x - VJT_LIMIT, - np.where( - x < VJT_MIN_LIMIT, - x + VJT_LIMIT, - x, - ), - ).astype(np.int32) - self._inner_spike, self._vjt, self._debug_thres_mode = super().update( - incoming_v, self._vjt + x, self._vjt ) idx = (self.timestamp + self.delay_relative - 1) % HwConfig.N_TIMESLOT_MAX @@ -520,8 +500,7 @@ def update( return self._inner_spike def reset_state(self, *args, **kwargs) -> None: - """Initialization, not the neuronal reset.""" - self.reset() # Call reset of `StatusMemory`. + self.reset_memory() # Call reset of `StatusMemory`. def __copy__(self) -> "Neuron": """Same as `__deepcopy__`.""" @@ -539,13 +518,13 @@ def __deepcopy__(self) -> "Neuron": self._shape, self.reset_mode, self.reset_v, - self.leaking_comparison, + self.leak_comparison, self.threshold_mask_bits, self.neg_thres_mode, self.neg_threshold, self.pos_threshold, - self.leaking_direction, - self.leaking_integration_mode, + self.leak_direction, + self.leak_integration_mode, self.leak_v, self.synaptic_integration_mode, self.bit_truncation, @@ -562,11 +541,11 @@ def copy(self) -> "Neuron": @property def shape_in(self) -> Tuple[int, ...]: - return self.varshape + return self._shape @property def shape_out(self) -> Tuple[int, ...]: - return self.varshape + return self._shape @property def num_in(self) -> int: diff --git a/paibox/neuron/neurons.py b/paibox/neuron/neurons.py index 6340fefd..96eaad0e 100644 --- a/paibox/neuron/neurons.py +++ b/paibox/neuron/neurons.py @@ -1,6 +1,6 @@ from typing import Optional -from paicorelib import LCM, LDM, LIM, NTM, RM, SIM +from paicorelib import LCM, LDM, NTM, RM from paibox.types import Shape @@ -16,51 +16,36 @@ def __init__( threshold: int, reset_v: int = 0, *, - delay: int = 1, - tick_wait_start: int = 1, - tick_wait_end: int = 0, - unrolling_factor: int = 1, keep_shape: bool = False, name: Optional[str] = None, + **kwargs, ) -> None: """ Arguments: - - shape : the shape of the neuron(s). It can be an integer, tuple or list. - - Threshold: When the membrane potential exceeds the threshold, neurons will fire - - reset_v : Membrane potential after firing - - Description: - IF neuron : intergration + firing + - shape: shape of neurons. + - threshold: when the membrane potential exceeds the threshold, neurons will fire. + - reset_v: reset membrane potential after firing + - delay: delay between neurons. Default is 1. + - tick_wait_start: set the neuron group to start at the `N`-th timestep. 0 means not to \ + start. Default is 1. + - tick_wait_end: set the neuron group to continue working for `M` timesteps, 0 means working\ + forever. Default is 0. + - unrolling_factor: the argument is related to the backend. It means that neurons will be \ + unrolled & deployed to more physical cores to reduce latency and increase throughput. \ + Default is 1. + - keep_shape: whether to maintain size information when recording data in the simulation. \ + Default is `False`. + - name: name of the object. """ - _sim = SIM.MODE_DETERMINISTIC - _lim = LIM.MODE_DETERMINISTIC - _ld = LDM.MODE_FORWARD - _lc = LCM.LEAK_AFTER_COMP - _pos_thres = threshold - _reset_v = reset_v - _ntm = NTM.MODE_SATURATION - _reset_mode = RM.MODE_NORMAL - super().__init__( shape, - _reset_mode, - _reset_v, - _lc, - 0, - _ntm, - 0, - _pos_thres, - _ld, - _lim, - 0, - _sim, - 0, - delay=delay, - tick_wait_start=tick_wait_start, - tick_wait_end=tick_wait_end, - unrolling_factor=unrolling_factor, + reset_v=reset_v, + neg_thres_mode=NTM.MODE_SATURATION, + neg_threshold=0, + pos_threshold=threshold, keep_shape=keep_shape, name=name, + **kwargs, ) @@ -72,57 +57,32 @@ def __init__( shape: Shape, threshold: int, reset_v: int = 0, - leaky_v: int = 0, + leak_v: int = 0, *, - delay: int = 1, - tick_wait_start: int = 1, - tick_wait_end: int = 0, - unrolling_factor: int = 1, keep_shape: bool = False, name: Optional[str] = None, + **kwargs, ) -> None: """ Arguments: - - shape: the shape of the neuron(s). It can be an integer, tuple or list. - - threshold: When the membrane potential exceeds the threshold, neurons will fire - - reset_v: Membrane potential after firing - - leaky_v: The leakage value will be directly added to the membrane potential. - If it is positive, the membrane potential will increase. - If is is negative, the membrane potential will decrease. - - Description: - LIF: leaky + intergration + firing + - shape: shape of neurons. + - threshold: when the membrane potential exceeds the threshold, neurons will fire. + - reset_v: reset membrane potential after firing + - leak_v: the signed leak voltage will be added directly to the membrane potential. + - If it is positive, the membrane potential will increase. + - If is is negative, the membrane potential will decrease. """ - _sim = SIM.MODE_DETERMINISTIC - _lim = LIM.MODE_DETERMINISTIC - _ld = LDM.MODE_FORWARD - _lc = LCM.LEAK_AFTER_COMP - _leak_v = leaky_v - _pos_thres = threshold - _reset_v = reset_v - _ntm = NTM.MODE_SATURATION - _reset_mode = RM.MODE_NORMAL - super().__init__( shape, - _reset_mode, - _reset_v, - _lc, - 0, - _ntm, - 0, - _pos_thres, - _ld, - _lim, - _leak_v, - _sim, - 0, - delay=delay, - tick_wait_start=tick_wait_start, - tick_wait_end=tick_wait_end, - unrolling_factor=unrolling_factor, + reset_mode=RM.MODE_NORMAL, + reset_v=reset_v, + neg_thres_mode=NTM.MODE_SATURATION, + neg_threshold=0, + pos_threshold=threshold, + leak_v=leak_v, keep_shape=keep_shape, name=name, + **kwargs, ) @@ -134,42 +94,25 @@ def __init__( shape: Shape, fire_step: int, *, - delay: int = 1, - tick_wait_start: int = 1, - tick_wait_end: int = 0, - unrolling_factor: int = 1, keep_shape: bool = False, name: Optional[str] = None, + **kwargs, ) -> None: """ Arguments: - - shape: the shape of the neuron(s). It can be an integer, tuple or list. + - shape: shape of neurons. - fire_step: every `N` spike, the neuron will fire positively. - Description: - The neuron receives `N` spikes and fires, then resets to 0. - `N` stands for firing steps. + NOTE: The neuron receives `N` spikes and fires, then it will reset to 0. """ super().__init__( shape, - RM.MODE_NORMAL, - 0, - LCM.LEAK_AFTER_COMP, - 0, - NTM.MODE_SATURATION, - 0, - fire_step, - LDM.MODE_FORWARD, - LIM.MODE_DETERMINISTIC, - 0, - SIM.MODE_DETERMINISTIC, - 0, - delay=delay, - tick_wait_start=tick_wait_start, - tick_wait_end=tick_wait_end, - unrolling_factor=unrolling_factor, + neg_thres_mode=NTM.MODE_SATURATION, + neg_threshold=0, + pos_threshold=fire_step, keep_shape=keep_shape, name=name, + **kwargs, ) @@ -182,48 +125,60 @@ def __init__( time_to_fire: int, neg_floor: int = -10, *, - delay: int = 1, - tick_wait_start: int = 1, - tick_wait_end: int = 0, - unrolling_factor: int = 1, keep_shape: bool = False, name: Optional[str] = None, + **kwargs, ) -> None: """ Arguments: - - shape: the shape of the neuron(s). It can be an integer, tuple or list. - - time_to_fire: after `time_to_fire` spikes, the neuron will fire positively. - - neg_floor: the negative floor that the neuron stays once firing. Default is -10. + - shape: shape of neurons. + - time_to_fire: after `N` spikes, the neuron will fire positively. + - neg_floor: once fired, the neurons will remain at this negative membrane potential. \ + Default is -10. - Description: - The neuron receives `N` spikes and fires, then resets the membrane potential to 0, - and never fires again. - - `N` stands for `time_to_fire`. + NOTE: Once the neuron receives `N` spikes and fires, it will reset to the negative floor & \ + never fires again. `N` stands for `time_to_fire`. """ leak_v = 1 - pos_thres = (1 + leak_v) * time_to_fire - _neg_thres = neg_floor - reset_v = -1 - _neg_thres + super().__init__( + shape, + reset_v=(-1 - neg_floor), + leak_comparison=LCM.LEAK_BEFORE_COMP, + neg_thres_mode=NTM.MODE_SATURATION, + neg_threshold=neg_floor, + pos_threshold=(1 + leak_v) * time_to_fire, + leak_direction=LDM.MODE_REVERSAL, + leak_v=leak_v, + keep_shape=keep_shape, + name=name, + **kwargs, + ) + +class Always1Neuron(Neuron): + """This neuron will always output 1 as long as it starts working. + + FIXME There must be a forward synapse connected to it, otherwise \ + the backend will go wrong. + """ + + def __init__( + self, + shape: Shape, + *, + keep_shape: bool = False, + name: Optional[str] = None, + **kwargs, + ) -> None: super().__init__( shape, - RM.MODE_NORMAL, - reset_v, - LCM.LEAK_BEFORE_COMP, - 0, - NTM.MODE_SATURATION, - neg_floor, - pos_thres, - LDM.MODE_REVERSAL, - LIM.MODE_DETERMINISTIC, - leak_v, - SIM.MODE_DETERMINISTIC, - 0, - delay=delay, - tick_wait_start=tick_wait_start, - tick_wait_end=tick_wait_end, - unrolling_factor=unrolling_factor, + reset_v=1, + leak_comparison=LCM.LEAK_BEFORE_COMP, + neg_thres_mode=NTM.MODE_SATURATION, + neg_threshold=0, + pos_threshold=0, + leak_v=(1 << 29) - 1, keep_shape=keep_shape, name=name, + **kwargs, ) diff --git a/paibox/neuron/utils.py b/paibox/neuron/utils.py new file mode 100644 index 00000000..bffc545d --- /dev/null +++ b/paibox/neuron/utils.py @@ -0,0 +1,29 @@ +import numpy as np + +from paibox.types import VoltageType + +VJT_MAX_LIMIT: int = 2**29 - 1 +VJT_MIN_LIMIT: int = -(2**29) +VJT_LIMIT: int = 2**30 + + +def _is_vjt_overflow(vjt: VoltageType) -> bool: + return bool(np.any(vjt > VJT_MAX_LIMIT) or np.any(vjt < VJT_MIN_LIMIT)) + + +def _vjt_overflow(vjt: VoltageType) -> VoltageType: + """Handle the overflow of the membrane potential. + + NOTE: If the incoming membrane potential (30-bit signed) overflows, the chip\ + will automatically handle it. This behavior needs to be implemented in \ + simulation. + """ + return np.where( + vjt > VJT_MAX_LIMIT, + vjt - VJT_LIMIT, + np.where( + vjt < VJT_MIN_LIMIT, + vjt + VJT_LIMIT, + vjt, + ), + ).astype(np.int32) diff --git a/paibox/node.py b/paibox/node.py index 2b6c3e9e..f8bed49e 100644 --- a/paibox/node.py +++ b/paibox/node.py @@ -33,4 +33,4 @@ def __getitem__(self, key: _KT) -> _VT: if key in self: return super().__getitem__(key) - raise KeyError(f"Key '{key}' not found.") + raise KeyError(f"key '{key}' not found.") diff --git a/paibox/projection.py b/paibox/projection.py index c23efe36..7d3dc45c 100644 --- a/paibox/projection.py +++ b/paibox/projection.py @@ -42,8 +42,8 @@ def __init__( """The input node of network. Arguments: - - input: the input value of the projection node. It can be numeric value \ - or callable function(function or `Encoder`). + - input: the input value of the projection node. It can be numeric value or callable\ + function(function or `Encoder`). - shape_out: the shape of the output. - keep_shape: wether to keep the shape when retieving the feature map. - name: the name of the node. Optional. @@ -60,7 +60,7 @@ def __init__( self._num_input = input self._func_input = _func_bypass - self._shape_out = as_shape(shape_out) + self._shape = as_shape(shape_out) self.keep_shape = keep_shape self.set_memory("_inner_spike", np.zeros((self.num_out,), dtype=np.bool_)) @@ -68,33 +68,32 @@ def __init__( def update(self, **kwargs) -> SpikeType: _spike = self._get_neumeric_input(**kwargs) - if isinstance(_spike, (int, np.integer)): + if isinstance(_spike, (int, np.bool_, np.integer)): self._inner_spike = np.full((self.num_out,), _spike, dtype=np.bool_) elif isinstance(_spike, np.ndarray): - try: - self._inner_spike = _spike.reshape((self.num_out,)).astype(np.bool_) - except ValueError: + if shape2num(_spike.shape) != self.num_out: raise ShapeError( - f"Cannot reshape input value from {_spike.shape} to ({self.num_out},)." + f"cannot reshape output value from {_spike.shape} to ({self.num_out},)." ) + self._inner_spike = _spike.ravel().astype(np.bool_) else: - # Should be never + # should never be reached raise TypeError( - f"Excepted type int, np.integer or np.ndarray, " + f"expected type int, np.bool_, np.integer or np.ndarray, " f"but got {_spike}, type {type(_spike)}." ) return self._inner_spike def reset_state(self) -> None: - self.reset() # Call reset of `StatusMemory`. + self.reset_memory() # Call reset of `StatusMemory`. def _get_neumeric_input(self, **kwargs): # If `_func_input` is `None` while `input` is numeric, use `input` as input to the projection. # Otherwise, use the output of `_func_input`. if self._num_input is None: if self._func_input is None: - raise SimulationError(f"Both numeric & functional input are not set.") + raise SimulationError(f"both numeric & functional input are not set.") else: return _call_with_ctx(self._func_input, **kwargs) @@ -113,7 +112,7 @@ def num_in(self) -> int: @property def num_out(self) -> int: - return shape2num(self._shape_out) + return shape2num(self._shape) @property def shape_in(self) -> Tuple[int, ...]: @@ -121,7 +120,7 @@ def shape_in(self) -> Tuple[int, ...]: @property def shape_out(self) -> Tuple[int, ...]: - return self._shape_out + return self._shape @property def input(self): @@ -130,10 +129,10 @@ def input(self): @input.setter def input(self, value: DataType) -> None: """Set the input at the beginning of running the simulation.""" - if not isinstance(value, (int, np.integer, np.ndarray)): + if not isinstance(value, (int, np.bool_, np.integer, np.ndarray)): raise TypeError( - f"Excepted type int, np.integer or np.ndarray, " - f"but got {value}, type {type(value)}" + f"expected type int, np.bool_, np.integer or np.ndarray, " + f"but got {value}, type {type(value)}." ) self._num_input = value @@ -167,8 +166,6 @@ def _call_with_ctx(f: Callable[..., DataType], *args, **kwargs) -> DataType: try: ctx = _FRONTEND_CONTEXT.get_ctx() bound = inspect.signature(f).bind(*args, **ctx, **kwargs) - # warnings.warn(_input_deprecate_msg, UserWarning) return f(*bound.args, **bound.kwargs) - except TypeError: return f(*args, **kwargs) diff --git a/paibox/simulator/__init__.py b/paibox/simulator/__init__.py index 2c8834a9..04dc9b2b 100644 --- a/paibox/simulator/__init__.py +++ b/paibox/simulator/__init__.py @@ -1,6 +1,5 @@ +from .encoder import LatencyEncoder as LatencyEncoder from .encoder import PeriodicEncoder as PeriodicEncoder from .encoder import PoissonEncoder as PoissonEncoder from .probe import Probe as Probe from .simulator import Simulator as Simulator - -__all__ = ["PeriodicEncoder", "PoissonEncoder", "Probe", "Simulator"] diff --git a/paibox/simulator/encoder.py b/paibox/simulator/encoder.py index 3f5d37d5..9ee624cb 100644 --- a/paibox/simulator/encoder.py +++ b/paibox/simulator/encoder.py @@ -1,69 +1,52 @@ -from abc import abstractmethod -from typing import Optional, Tuple +import math +from typing import Literal, Optional import numpy as np -from paibox.base import DynamicSys -from paibox.types import Shape -from paibox.utils import as_shape, shape2num +from paibox.mixin import StatusMemory +from paibox.types import SpikeType -__all__ = ["PeriodicEncoder", "PoissonEncoder"] +__all__ = ["LatencyEncoder", "PeriodicEncoder", "PoissonEncoder"] MAXSEED = np.iinfo(np.uint32).max MAXINT = np.iinfo(np.int32).max -class Encoder(DynamicSys): - def __init__( - self, - shape_out: Shape = (0,), - *, - seed: Optional[int] = None, - name: Optional[str] = None, - ) -> None: - self._shape_out = as_shape(shape_out) - - super().__init__(name) +class Encoder: + def __init__(self, seed: Optional[int] = None) -> None: self.rng = self._get_rng(seed) def _get_rng(self, seed: Optional[int] = None) -> np.random.RandomState: _seed = np.random.randint(MAXINT) if seed is None else seed return np.random.RandomState(_seed) - @property - def num_out(self) -> int: - return shape2num(self._shape_out) - - @property - def shape_in(self) -> Tuple[int, ...]: - return (0,) - - @property - def shape_out(self) -> Tuple[int, ...]: - return self._shape_out + def __call__(self, x: np.ndarray, *args, **kwargs) -> SpikeType: + raise NotImplementedError class StatelessEncoder(Encoder): pass -class StatefulEncoder(Encoder): - def __init__(self, T: int, shape_out: Shape, **kwargs) -> None: - super().__init__(shape_out, **kwargs) +class StatefulEncoder(Encoder, StatusMemory): + def __init__(self, T: int, seed: Optional[int] = None) -> None: + super().__init__(seed) + super(Encoder, self).__init__() if T < 1: - raise ValueError(f"T must be positive, but got {T}") + raise ValueError(f"'T' must be positive, but got {T}.") self.T = T self.set_memory("spike", None) self.set_memory("t", 0) - def __call__(self, x: Optional[np.ndarray] = None, *args, **kwargs) -> np.ndarray: + def __call__(self, x: Optional[np.ndarray] = None, *args, **kwargs) -> SpikeType: + # If there is no encoded spike but there is an input, encode the input if self.spike is None: if x is None: - raise ValueError("Input must be given if spike is None") + raise ValueError("input must be given if 'spike' is None.") - self.single_step_encode(x) + self.encode(x) t = self.t self.t += 1 @@ -73,8 +56,8 @@ def __call__(self, x: Optional[np.ndarray] = None, *args, **kwargs) -> np.ndarra return self.spike[t] - @abstractmethod - def single_step_encode(self, x: np.ndarray): + def encode(self, x: np.ndarray) -> None: + """Encoding function. Called only if there is no encoded spike.""" raise NotImplementedError @@ -83,24 +66,54 @@ def __init__(self, spike: np.ndarray, **kwargs) -> None: """Periodic encoder. Args: - - spike: the input spike. Encode when instantiate itself. \ - T = `.shape[0]` & shape_out = `.shape[1]`. + - spike: the spike to be encoded. Encode at instantiation, where `T=shape[0]` & `shape_out=shape[1]`. """ - super().__init__(spike.shape[0], spike.shape[1], **kwargs) + super().__init__(spike.shape[0], **kwargs) self.spike = spike + def encode(self, x: np.ndarray) -> None: + self.spike = x + self.T = x.shape[0] + + +class LatencyEncoder(StatefulEncoder): + def __init__(self, T: int, encoding_func: Literal["linear", "log"]) -> None: + """Latency encoder. + + Args: + - T: encoding timestep. + - encoding_func: encoding function. It can be 'log' or 'linear'. + + NOTE: See details at https://spikingjelly.readthedocs.io/zh-cn/latest/activation_based/2_encoding.html#id5 + """ + super().__init__(T) + + if encoding_func == "log": + self.alpha = math.exp(T - 1) - 1 + elif encoding_func != "linear": # `alpha` is not used in method 'linear'. + raise ValueError("encoding function must be 'linear' or 'log'.") + + self.enc_func = encoding_func + + def encode(self, x: np.ndarray) -> None: + if self.enc_func == "log": + t_f = (self.T - 1 - np.log(self.alpha * x + 1)).round().astype(np.int64) + else: + t_f = ((self.T - 1.0) * (1.0 - x)).round().astype(np.int64) + + indices = t_f.ravel() + spike = np.eye(self.T, dtype=np.bool_)[indices] + # [*, T] -> [T, *] + self.spike = np.moveaxis(spike, -1, 0) + class PoissonEncoder(StatelessEncoder): def __init__(self, seed: Optional[int] = None, **kwargs) -> None: """Poisson encoder. - Args: - - seed: the random seed. - - NOTE: The output size of the poisson encoder depends on the \ - actual input size. + NOTE: The output shape of the poisson encoder depends on the input shape. """ - super().__init__(seed=seed, **kwargs) + super().__init__(seed, **kwargs) - def __call__(self, x: np.ndarray, *args, **kwargs) -> np.ndarray: + def __call__(self, x: np.ndarray, *args, **kwargs) -> SpikeType: return np.less_equal(self.rng.random(x.shape), x).astype(np.bool_) diff --git a/paibox/simulator/probe.py b/paibox/simulator/probe.py index 8a1b9253..e2404e12 100644 --- a/paibox/simulator/probe.py +++ b/paibox/simulator/probe.py @@ -26,7 +26,7 @@ def __init__( def _check_attr(self, target: PAIBoxObject) -> None: if not hasattr(target, self.attr): raise AttributeError( - f"Attribute '{self.attr}' not found in target {self.target}." + f"attribute '{self.attr}' not found in target {self.target}." ) self.target = target diff --git a/paibox/simulator/simulator.py b/paibox/simulator/simulator.py index 65238eb9..a2228463 100644 --- a/paibox/simulator/simulator.py +++ b/paibox/simulator/simulator.py @@ -29,7 +29,7 @@ def __init__( """ if not isinstance(target, DynamicSys): raise SimulationError( - f"Target must be an instance of {DynamicSys.__name__}, but we got {target}: {type(target)}" + f"target must be an instance of {DynamicSys.__name__}, but got {target}, {type(target)}." ) super().__init__(name) @@ -60,18 +60,18 @@ def run(self, duration: int, reset: bool = False, **kwargs) -> None: """ if kwargs: warnings.warn( - "Passing extra arguments through 'run()' will be deprecated." + "passing extra arguments through 'run()' will be deprecated. " "Use 'FRONTEND_ENV.save()' instead.", DeprecationWarning, ) if duration < 1: - raise SimulationError(f"Duration must be positive, but got {duration}") + raise SimulationError(f"duration must be positive, but got {duration}.") n_steps = self._get_nstep(duration) if n_steps < 1: raise SimulationError( - f"Steps of simulation must be positive, but got {n_steps}" + f"the number of simulation steps must be positive, but got {n_steps}." ) indices = np.arange(self._ts, self._ts + n_steps, dtype=np.uint16) @@ -105,7 +105,7 @@ def remove_probe(self, probe: Probe) -> None: self.probes.remove(probe) self._sim_data.pop(probe) else: - raise KeyError(f"Probe '{probe.name}' does not exist.") + raise KeyError(f"probe '{probe.name}' does not exist.") def _run_step(self, indices: NDArray[np.uint16], **kwargs) -> None: for i in range(indices.shape[0]): @@ -142,7 +142,7 @@ def get_raw_at_t(self, probe: Probe, t: int) -> Any: t_index = t if self._start_time_zero else t - 1 if not t_start <= t < self.timestamp: # [t_start, timestamp) - raise IndexError(f"Time {t} is out of range [{t_start}, {self.timestamp}).") + raise IndexError(f"time {t} is out of range [{t_start}, {self.timestamp}).") return self._sim_data[probe][t_index] diff --git a/paibox/synapses/__init__.py b/paibox/synapses/__init__.py index d7518cb4..19470873 100644 --- a/paibox/synapses/__init__.py +++ b/paibox/synapses/__init__.py @@ -1,6 +1,8 @@ -from .synapses import RIGISTER_MASTER_KEY_FORMAT +from .base import RIGISTER_MASTER_KEY_FORMAT, SynSys +from .synapses import Conv1d as Conv1d +from .synapses import Conv2d as Conv2d +from .synapses import FullConn as FullConn from .synapses import NoDecay as NoDecay -from .synapses import SynSys as SynSys -from .transforms import ConnType as ConnType +from .transforms import GeneralConnType as GeneralConnType -__all__ = ["NoDecay", "SynSys"] +__all__ = ["Conv1d", "Conv2d", "FullConn", "NoDecay", "GeneralConnType"] diff --git a/paibox/synapses/base.py b/paibox/synapses/base.py new file mode 100644 index 00000000..0928aa71 --- /dev/null +++ b/paibox/synapses/base.py @@ -0,0 +1,251 @@ +from typing import ClassVar, Optional, Tuple, Union + +import numpy as np +from paicorelib import HwConfig +from paicorelib import WeightPrecision as WP + +from paibox.base import NeuDyn, SynSys +from paibox.exceptions import ShapeError +from paibox.neuron import Neuron +from paibox.projection import InputProj +from paibox.types import DataArrayType, SynOutType, WeightType + +from .conv_utils import _fm_ndim1_check, _fm_ndim2_check, _KOrder3d, _KOrder4d +from .transforms import AllToAll, Conv1dForward, Conv2dForward +from .transforms import GeneralConnType as GConnType +from .transforms import Identity, MaskedLinear, OneToOne, Transform + +RIGISTER_MASTER_KEY_FORMAT = "{0}.output" + + +def _check_equal(num_in: int, num_out: int) -> int: + if num_in != num_out: + raise ShapeError( + f"the number of source & destination neurons must be equal: {num_in} != {num_out}." + ) + + return num_in + + +class Synapses: + def __init__( + self, + source: Union[NeuDyn, InputProj], + dest: NeuDyn, + ) -> None: + self._source = source + self._dest = dest + + @property + def source(self) -> Union[NeuDyn, InputProj]: + return self._source + + @property + def dest(self) -> NeuDyn: + return self._dest + + @property + def shape_in(self) -> Tuple[int, ...]: + return self._source.shape_out + + @property + def shape_out(self) -> Tuple[int, ...]: + return self._dest.shape_in + + @property + def num_in(self) -> int: + return self._source.num_out + + @property + def num_out(self) -> int: + return self._dest.num_in + + +class FullConnectedSyn(Synapses, SynSys): + def __init__( + self, + source: Union[NeuDyn, InputProj], + dest: NeuDyn, + name: Optional[str] = None, + ) -> None: + super(Synapses, self).__init__(name) + super().__init__(source, dest) + + self.set_memory("_synout", np.zeros((self.num_out,), dtype=np.int32)) + + # Register `self` for the destination `NeuDyn`. + dest.register_master(RIGISTER_MASTER_KEY_FORMAT.format(self.name), self) + + def __call__(self, *args, **kwargs) -> SynOutType: + return self.update(*args, **kwargs) + + def update(self, spike: Optional[np.ndarray] = None, *args, **kwargs) -> SynOutType: + # Retrieve the spike at index `timestamp` of the dest neurons + if self.dest.is_working: + if isinstance(self.source, InputProj): + synin = self.source.output.copy() if spike is None else spike + else: + idx = self.dest.timestamp % HwConfig.N_TIMESLOT_MAX + synin = self.source.output[idx].copy() if spike is None else spike + else: + # Retrieve 0 to the dest neurons if it is not working + synin = np.zeros_like(self.source.spike, dtype=np.bool_) + + self._synout = self.comm(synin).ravel().astype(np.int32) + return self._synout + + def reset_state(self, *args, **kwargs) -> None: + # TODO Add other initialization methods in the future. + self.reset_memory() # Call reset of `StatusMemory`. + + def _set_comm(self, comm: Transform) -> None: + self.comm = comm + + @property + def output(self) -> SynOutType: + return self._synout + + @property + def weights(self) -> WeightType: + return self.comm.weights + + @property + def weight_precision(self) -> WP: + return self.comm._get_wp(self.CFLAG_ENABLE_WP_OPTIMIZATION) + + @property + def connectivity(self) -> WeightType: + """The connectivity matrix in `np.bool_` or `np.int8` format.""" + return self.comm.connectivity + + +class FullConnSyn(FullConnectedSyn): + def __init__( + self, + source: Union[NeuDyn, InputProj], + dest: NeuDyn, + weights: DataArrayType, + conn_type: GConnType, + name: Optional[str] = None, + ) -> None: + super().__init__(source, dest, name) + + if conn_type is GConnType.One2One: + comm = OneToOne(_check_equal(self.num_in, self.num_out), weights) + elif conn_type is GConnType.Identity: + if not isinstance(weights, (int, np.bool_, np.integer)): + raise TypeError( + f"expected type int, np.bool_, np.integer, but got type {type(weights)}." + ) + comm = Identity(_check_equal(self.num_in, self.num_out), weights) + elif conn_type is GConnType.All2All: + comm = AllToAll((self.num_in, self.num_out), weights) + else: # MatConn + if not isinstance(weights, np.ndarray): + raise TypeError( + f"expected type np.ndarray, but got type {type(weights)}." + ) + comm = MaskedLinear((self.num_in, self.num_out), weights) + + self._set_comm(comm) + + +class Conv1dSyn(FullConnectedSyn): + _spatial_ndim: ClassVar[int] = 1 + + def __init__( + self, + source: Union[NeuDyn, InputProj], + dest: Neuron, + kernel: np.ndarray, + stride: Tuple[int], + # padding: Tuple[int], + # fm_order: _Order2d, + order: _KOrder3d, + name: Optional[str] = None, + ) -> None: + super().__init__(source, dest, name) + + if kernel.ndim != self._spatial_ndim + 2: + raise ShapeError( + f"convolution kernel dimension must be {self._spatial_ndim + 2}, but got {kernel.ndim}." + ) + + if order == "IOL": + _kernel = np.swapaxes(kernel, 0, 1) + else: + _kernel = kernel.copy() + + # O,I,L + out_channels, in_channels, kernel_l = _kernel.shape + + # C,L + in_ch, in_l = _fm_ndim1_check(source.shape_out, "CL") + out_ch, out_l = _fm_ndim1_check(dest.shape_out, "CL") + + if in_ch != in_channels: + raise ShapeError(f"input channels mismatch: {in_ch} != {in_channels}.") + + if out_ch != out_channels: + raise ShapeError(f"output channels mismatch: {out_ch} != {out_channels}.") + + # If padding is considered, the implementation of convolution unrolling + # is extremely complex, so fix it. + padding = (0,) + + assert (in_l + 2 * padding[0] - kernel_l) // stride[0] + 1 == out_l + + comm = Conv1dForward((in_l,), (out_l,), _kernel, stride, padding) + + self.comm = comm + + +class Conv2dSyn(FullConnectedSyn): + _spatial_ndim: ClassVar[int] = 2 + + def __init__( + self, + source: Union[NeuDyn, InputProj], + dest: Neuron, + kernel: np.ndarray, + stride: Tuple[int, int], + # padding: Tuple[int, int], + # fm_order: _Order3d, + order: _KOrder4d, + name: Optional[str] = None, + ) -> None: + super().__init__(source, dest, name) + + if kernel.ndim != self._spatial_ndim + 2: + raise ShapeError( + f"convolution kernel dimension must be {self._spatial_ndim + 2}, but got {kernel.ndim}." + ) + + if order == "IOHW": + _kernel = np.swapaxes(kernel, 0, 1) + else: + _kernel = kernel.copy() + + # O,I,H,W + out_channels, in_channels, kernel_h, kernel_w = _kernel.shape + + # C,H,W + in_ch, in_h, in_w = _fm_ndim2_check(source.shape_out, "CHW") + out_ch, out_h, out_w = _fm_ndim2_check(dest.shape_out, "CHW") + + if in_ch != in_channels: + raise ShapeError(f"input channels mismatch: {in_ch} != {in_channels}.") + + if out_ch != out_channels: + raise ShapeError(f"output channels mismatch: {out_ch} != {out_channels}.") + + # If padding is considered, the implementation of convolution unrolling + # is extremely complex, so fix it. + padding = (0, 0) + + assert (in_h + 2 * padding[0] - kernel_h) // stride[0] + 1 == out_h + assert (in_w + 2 * padding[1] - kernel_w) // stride[1] + 1 == out_w + + comm = Conv2dForward((in_h, in_w), (out_h, out_w), _kernel, stride, padding) + + self._set_comm(comm) diff --git a/paibox/synapses/conv_utils.py b/paibox/synapses/conv_utils.py new file mode 100644 index 00000000..3e658fda --- /dev/null +++ b/paibox/synapses/conv_utils.py @@ -0,0 +1,257 @@ +from functools import partial +from itertools import repeat +from typing import Any, Iterable, Literal, Tuple, TypeVar, Union + +import numpy as np +from numpy.typing import NDArray + +from paibox.exceptions import ShapeError +from paibox.types import SynOutType, WeightType + +T = TypeVar("T") + +_TupleAnyType = Union[T, Tuple[T, ...]] +_Tuple1Type = Union[T, Tuple[T]] +_Tuple2Type = Union[T, Tuple[T, T]] +_Tuple3Type = Union[T, Tuple[T, T, T]] + +_SizeAnyType = _TupleAnyType[int] +_Size1Type = _Tuple1Type[int] +_Size2Type = _Tuple2Type[int] +_Size3Type = _Tuple3Type[int] + +SizeAnyType = Tuple[int, ...] +Size1Type = Tuple[int] +Size2Type = Tuple[int, int] +Size3Type = Tuple[int, int, int] + +_Order2d = Literal["CL", "LC"] # Feature map order in 2d +_Order3d = Literal["CHW", "HWC"] # Feature map order in 3d +_KOrder3d = Literal["OIL", "IOL"] # Kernel order in 3d +_KOrder4d = Literal["OIHW", "IOHW"] # Kernel order in 4d + + +def _ntuple(x, n: int) -> Tuple[Any, ...]: + if isinstance(x, Iterable): + return tuple(x) + + return tuple(repeat(x, n)) + + +_single = partial(_ntuple, n=1) +_pair = partial(_ntuple, n=2) +_triple = partial(_ntuple, n=3) +_quadruple = partial(_ntuple, n=4) + + +def _fm_ndim1_check(fm_shape: SizeAnyType, fm_order: _Order2d) -> Size2Type: + if len(fm_shape) < 1 or len(fm_shape) > 2: + raise ShapeError(f"expected shape of 1 or 2, but got {len(fm_shape)}.") + + if len(fm_shape) == 1: + channels, l = (1,) + fm_shape + else: + if fm_order == "CL": + channels, l = fm_shape + else: + l, channels = fm_shape + + return channels, l + + +def _fm_ndim2_check(fm_shape: SizeAnyType, fm_order: _Order3d) -> Size3Type: + if len(fm_shape) < 2 or len(fm_shape) > 3: + raise ShapeError(f"expected shape of 2 or 3, but got {len(fm_shape)}.") + + if len(fm_shape) == 2: + channels, h, w = (1,) + fm_shape + else: + if fm_order is "CHW": + channels, h, w = fm_shape + else: + h, w, channels = fm_shape + + return channels, h, w + + +def _conv1d_unroll( + in_shape: Size1Type, + out_shape: Size1Type, + kernel: WeightType, + stride: Size1Type, + # padding: Size1Type, +) -> WeightType: + """Unroll the convolution kernel of 1d convolution into a matrix. + + XXX: The case where the input feature map is in 'LC' order is not considered for the time being. + """ + cout, cin, kl = kernel.shape + il = in_shape[0] + ol = out_shape[0] + + w_unrolled = np.zeros((cin * il, cout * ol), dtype=kernel.dtype) + zeros_image = np.zeros((cin * il, cout, ol), dtype=kernel.dtype) + + for i in range(ol): + for ch_idx in np.ndindex(kernel.shape[:2]): + # [0] -> o_ch, [1] -> i_ch + zeros_image[ + i * stride[0] + ch_idx[1] * il : i * stride[0] + ch_idx[1] * il + kl, + ch_idx[0], + i, + ] = kernel[ch_idx[0], ch_idx[1], :] + + t = zeros_image[:, :, i].T + for o_ch in range(cout): + w_unrolled[:, i + o_ch * ol] = t[o_ch].ravel() + + return w_unrolled + + +def _conv2d_unroll( + in_shape: Size2Type, + out_shape: Size2Type, + kernel: WeightType, + stride: Size2Type, + # padding: Size2Type, +) -> WeightType: + """Unroll the convolution kernel of 2d convolution into a matrix. + + XXX: The case where the input feature map is in 'HWC' order is not considered for the time being. + """ + cout, cin, kh, kw = kernel.shape + + ih, iw = in_shape + oh, ow = out_shape + in_size = ih * iw + out_size = oh * ow + + w_unrolled = np.zeros((cin * in_size, cout * out_size), dtype=kernel.dtype) + zeros_image = np.zeros((cin * ih, iw * cout, out_size), dtype=kernel.dtype) + + for i in range(oh): + for j in range(ow): + for ch_idx in np.ndindex(kernel.shape[:2]): + # [0] -> o_ch, [1] -> i_ch + zeros_image[ + i * stride[0] + + ch_idx[1] * ih : i * stride[0] + + ch_idx[1] * ih + + kh, + j * stride[1] + + ch_idx[0] * iw : j * stride[1] + + ch_idx[0] * iw + + kw, + i * ow + j, + ] = kernel[ch_idx[0], ch_idx[1], :, :] + + t = np.swapaxes( + zeros_image[:, :, i * ow + j].reshape(cin * ih, cout, iw), + 0, + 1, + # .transpose(1, 0, 2) + ) + for o_ch in range(cout): + w_unrolled[:, i * ow + j + o_ch * out_size] = t[o_ch].ravel() + + return w_unrolled + + +def _conv1d_faster( + x_cl: np.ndarray, + out_shape: Size1Type, + kernel: WeightType, + stride: Size1Type, + padding: Size1Type, +) -> SynOutType: + xc, xl = x_cl.shape + + # (O, I, L) + cout, cin, kl = kernel.shape + assert xc == cin + + x_padded = np.pad(x_cl, ((0, 0), (padding[0], padding[0])), mode="constant") + + assert (xl + padding[0] * 2 - kl) // stride[0] + 1 == out_shape[0] + + # kernel: (cout, cin, kl) -> (cout, cin*kl) + col_kernel = kernel.reshape(cout, -1) + + # padded: (cin, xl+2*p[0]-kl) -> (ol, cin*kl) + col_fm = _1d_im2col(x_padded, out_shape[0], kl, stride) + + # out = np.zeros((cout,) + out_shape, dtype=np.int64) + # (ol, cin*kl) * (cout, cin*kl)^T = (ol, cout) + out = col_fm @ col_kernel.T # + self.bias + + # (ol, cout) -> (cout, ol) + return out.astype(np.int32).T + + +def _conv2d_faster( + x_chw: np.ndarray, + out_shape: Size2Type, + kernel: WeightType, + stride: Size2Type, + padding: Size2Type, +) -> SynOutType: + xc, xh, xw = x_chw.shape + + # (O, I, H, W) + cout, cin, kh, kw = kernel.shape + assert xc == cin + + x_padded = np.pad( + x_chw, + ((0, 0), (padding[0], padding[0]), (padding[1], padding[1])), + mode="constant", + ) + + assert (xh + padding[0] * 2 - kh) // stride[0] + 1 == out_shape[0] + assert (xw + padding[1] * 2 - kw) // stride[1] + 1 == out_shape[1] + + # kernel: (cout, cin, kh, kw) -> (cout, cin*kh*kw) + col_kernel = kernel.reshape(cout, -1) + + # padded: (cin, xh+2*p[0]-kh, xw+2*p[1]-kw) -> (oh*ow, cin*kh*kw) + col_fm = _2d_im2col(x_padded, out_shape[0], out_shape[1], kh, kw, stride) + + # out = np.zeros((cout,) + out_shape, dtype=np.int64) + # (oh*ow, cin*kh*kw) * (cout, cin*kh*kw)^T = (oh*ow, cout) + out = col_fm @ col_kernel.T # + self.bias + + # (oh*ow, cout) -> (cout, oh*ow) -> (cout, oh, ow) + out = out.astype(np.int32).T.reshape((cout,) + out_shape) + + return out + + +def _1d_im2col( + x_padded: np.ndarray, ol: int, kl: int, stride: Size1Type +) -> NDArray[np.int64]: + cols = np.zeros((ol, x_padded.shape[0] * kl), dtype=np.int64) + + _, pl = x_padded.shape + + idx = 0 + for i in range(0, pl - kl + 1, stride[0]): + cols[idx] = x_padded[:, i : i + kl].ravel() + idx += 1 + + return cols + + +def _2d_im2col( + x_padded: np.ndarray, oh: int, ow: int, kh: int, kw: int, stride: Size2Type +) -> NDArray[np.int64]: + cols = np.zeros((oh * ow, x_padded.shape[0] * kh * kw), dtype=np.int64) + + _, ph, pw = x_padded.shape + + idx = 0 + for i in range(0, ph - kh + 1, stride[0]): + for j in range(0, pw - kw + 1, stride[1]): + cols[idx] = x_padded[:, i : i + kh, j : j + kw].ravel() + idx += 1 + + return cols diff --git a/paibox/synapses/synapses.py b/paibox/synapses/synapses.py index 56be7a4d..4eda3998 100644 --- a/paibox/synapses/synapses.py +++ b/paibox/synapses/synapses.py @@ -1,189 +1,140 @@ -from typing import ClassVar, Optional, Tuple, Union +import warnings +from typing import Optional, Union import numpy as np -from numpy.typing import NDArray -from paicorelib import HwConfig -from paicorelib import WeightPrecision as WP -from paibox.base import DynamicSys, NeuDyn -from paibox.exceptions import ShapeError +from paibox.base import NeuDyn +from paibox.neuron import Neuron from paibox.projection import InputProj -from paibox.types import DataArrayType, WeightType +from paibox.types import DataArrayType -from .transforms import * +from .base import Conv1dSyn, Conv2dSyn, FullConnSyn +from .conv_utils import _KOrder3d, _KOrder4d, _pair, _single, _Size1Type, _Size2Type +from .transforms import GeneralConnType as GConnType -__all__ = ["NoDecay"] +__all__ = ["FullConn", "Conv1d", "Conv2d"] -RIGISTER_MASTER_KEY_FORMAT = "{0}.output" - - -class Synapses: - """A map connected between neurons of the previous `Node`, \ - and axons of the following `Node`. - - User can use connectivity matrix or COO to represent the \ - connectivity of synapses. - """ +class FullConn(FullConnSyn): def __init__( self, source: Union[NeuDyn, InputProj], dest: NeuDyn, - /, - conn_type: ConnType, + weights: DataArrayType = 1, + *, + conn_type: GConnType = GConnType.MatConn, + name: Optional[str] = None, ) -> None: """ - Args: - - source: the source group of neurons. - - dest: the destination group of neurons. + Arguments: + - source: source neuron(s). + - dest: destination neuron(s). + - weights: weights of the synapses. It can be a scalar or `np.ndarray`. - conn_type: the type of connection. + - name: name of this synapses. Optional. """ - self.source = source - self.dest = dest - self._check(conn_type) - - def _check(self, conn_type: ConnType) -> None: - if conn_type is ConnType.One2One or conn_type is ConnType.BYPASS: - if self.num_in != self.num_out: - raise ShapeError( - f"The number of source & destination neurons must " - f"be equal, but {self.num_in} != {self.num_out}." - ) + super().__init__(source, dest, weights, conn_type, name) - @property - def shape_in(self) -> Tuple[int, ...]: - return self.source.shape_out - @property - def shape_out(self) -> Tuple[int, ...]: - return self.dest.shape_in - - @property - def num_in(self) -> int: - return self.source.num_out - - @property - def num_out(self) -> int: - return self.dest.num_in - - -class SynSys(Synapses, DynamicSys): - CFLAG_ENABLE_WP_OPTIMIZATION: ClassVar[bool] = True - """Compilation flag for weight precision optimization.""" - - def __call__(self, *args, **kwargs) -> NDArray[np.int32]: - return self.update(*args, **kwargs) - - @property - def weights(self) -> WeightType: - raise NotImplementedError - - @property - def weight_precision(self) -> WP: - raise NotImplementedError - - @property - def connectivity(self) -> NDArray[Union[np.bool_, np.int8]]: - raise NotImplementedError +class NoDecay(FullConn): + def __init__( + self, + source: Union[NeuDyn, InputProj], + dest: NeuDyn, + weights: DataArrayType = 1, + *, + conn_type: GConnType = GConnType.MatConn, + name: Optional[str] = None, + ) -> None: + warnings.warn( + "'NoDecay' class will be deprecated in future versions. Use 'FullConn' instead.", + DeprecationWarning, + ) - @property - def n_axon_each(self) -> np.ndarray: - return np.sum(self.connectivity, axis=0) + super().__init__(source, dest, weights, conn_type=conn_type, name=name) - @property - def num_axon(self) -> int: - return np.count_nonzero(np.any(self.connectivity, axis=1)) - @property - def num_dendrite(self) -> int: - return np.count_nonzero(np.any(self.connectivity, axis=0)) +class Conv1d(Conv1dSyn): + def __init__( + self, + source: Union[Neuron, InputProj], + dest: Neuron, + kernel: np.ndarray, + *, + stride: _Size1Type = 1, + # padding: _Size1Type = 0, + # fm_order: _Order2d = "CL", + kernel_order: _KOrder3d = "OIL", + name: Optional[str] = None, + ) -> None: + """1d convolution synapses in fully-unrolled format. + Arguments: + - source: source neuron(s). The dimensions need to be expressed explicitly as (C,L). + - dest: destination neuron(s). + - kernel: convolution kernel. Its dimension order is either (O,I,L) or (I,O,L), depending on \ + the argument `kernel_order`. + - stride: the step size of the kernel sliding. It can be a scalar or a tuple of 2 integers. + - kernel_order: dimension order of kernel, (O,I,L) or (I,O,L). (O,I,L) stands for (output \ + channels, input channels, length). + - name: name of the 1d convolution. Optional. + """ + # if fm_order not in ("CL", "LC"): + # raise ValueError(f"feature map order must be 'CL' or 'LC'.") + + if kernel_order not in ("OIL", "IOL"): + raise ValueError(f"kernel order must be 'OIL' or 'IOL'.") + + super().__init__( + source, + dest, + kernel, + _single(stride), + # _single(padding), + # fm_order, + kernel_order, + name=name, + ) -class NoDecay(SynSys): - """Synapses model with no decay.""" +class Conv2d(Conv2dSyn): def __init__( self, - source: Union[NeuDyn, InputProj], - dest: NeuDyn, - weights: DataArrayType = 1, + source: Union[Neuron, InputProj], + dest: Neuron, + kernel: np.ndarray, *, - conn_type: ConnType = ConnType.MatConn, + stride: _Size2Type = 1, + # padding: _Size2Type = 0, + # fm_order: _Order3d = "CHW", + kernel_order: _KOrder4d = "OIHW", name: Optional[str] = None, ) -> None: - """ + """2d convolution synapses in fully-unrolled format. + Arguments: - - source: source neuron(s). + - source: source neuron(s). The dimensions need to be expressed explicitly as (C,H,W). - dest: destination neuron(s). - - weights: weights of the synapses. It can be a scalar or `np.ndarray`. - - conn_type: the type of connection. - - name: name of this synapses. Optional. + - kernel: convolution kernel. Its dimension order is either (O,I,H,W) or (I,O,H,W), \ + depending on the argument `kernel_order`. + - stride: the step size of the kernel sliding. It can be a scalar or a tuple of 2 integers. + - kernel_order: dimension order of kernel, (O,I,H,W) or (I,O,H,W). (O,I,H,W) stands for \ + (output channels, input channels, height, width). + - name: name of the 2d convolution. Optional. """ - super().__init__(source, dest, conn_type) - super(Synapses, self).__init__(name) - - if conn_type is ConnType.One2One: - self.comm = OneToOne(self.num_in, weights) - elif conn_type is ConnType.BYPASS: - self.comm = ByPass(self.num_in) - elif conn_type is ConnType.All2All: - self.comm = AllToAll((self.num_in, self.num_out), weights) - else: # MatConn - if not isinstance(weights, np.ndarray): - raise TypeError( - f"Expected type int, np.integer or np.ndarray, but got type {type(weights)}" - ) - - self.comm = MaskedLinear((self.num_in, self.num_out), weights) - - self.weights.setflags(write=False) - self.set_memory("_synout", np.zeros((self.num_out,), dtype=np.int32)) - - # Register `self` for the destination `NeuDyn`. - dest.register_master(RIGISTER_MASTER_KEY_FORMAT.format(self.name), self) - - def update( - self, spike: Optional[np.ndarray] = None, *args, **kwargs - ) -> NDArray[np.int32]: - # Retrieve the spike at index `timestamp` of the dest neurons - if self.dest.is_working: - if isinstance(self.source, InputProj): - synin = self.source.output.copy() if spike is None else spike - else: - idx = self.dest.timestamp % HwConfig.N_TIMESLOT_MAX - synin = self.source.output[idx].copy() if spike is None else spike - else: - # Retrieve 0 to the dest neurons if it is not working - synin = np.zeros_like(self.source.spike, dtype=np.bool_) - - self._synout = self.comm(synin).astype(np.int32) - return self._synout - - def reset_state(self, *args, **kwargs) -> None: - # TODO Add other initialization methods in the future. - self.reset() # Call reset of `StatusMemory`. - - @property - def output(self) -> NDArray[np.int32]: - return self._synout - - @property - def weights(self): - return self.comm.weights - - @property - def weight_precision(self) -> WP: - return self.comm._get_wp(self.CFLAG_ENABLE_WP_OPTIMIZATION) - - @property - def connectivity(self): - """The connectivity matrix in `np.ndarray` format.""" - return self.comm.connectivity - - def __repr__(self) -> str: - name = self.__class__.__name__ - return ( - f"{name}(name={self.name}, \n" - f'{" " * len(name)} source={self.source}, \n' - f'{" " * len(name)} dest={self.dest})' + # if fm_order not in ("CHW", "HWC"): + # raise ValueError(f"feature map order must be 'CHW or 'HWC'.") + + if kernel_order not in ("OIHW", "IOHW"): + raise ValueError(f"kernel order must be 'OIHW' or 'IOHW'.") + + super().__init__( + source, + dest, + kernel, + _pair(stride), + # _pair(padding), + # fm_order, + kernel_order, + name=name, ) diff --git a/paibox/synapses/transforms.py b/paibox/synapses/transforms.py index 34577e12..95ddd656 100644 --- a/paibox/synapses/transforms.py +++ b/paibox/synapses/transforms.py @@ -1,15 +1,31 @@ +import warnings from enum import Enum, auto, unique -from typing import Tuple, Type, Union import numpy as np -from numpy.typing import NDArray from paicorelib import WeightPrecision as WP -from paibox.exceptions import ShapeError -from paibox.types import DataArrayType, WeightType +from paibox.exceptions import AutoOptimizationWarning, ShapeError +from paibox.types import DataArrayType, IntScalarType, SynOutType, WeightType from paibox.utils import is_shape -__all__ = ["ConnType", "OneToOne", "ByPass", "AllToAll", "MaskedLinear"] +from .conv_utils import ( + Size1Type, + Size2Type, + _conv1d_faster, + _conv1d_unroll, + _conv2d_faster, + _conv2d_unroll, +) + +__all__ = [ + "GeneralConnType", + "OneToOne", + "AllToAll", + "Identity", + "MaskedLinear", + "Conv1dForward", + "Conv2dForward", +] MAX_INT1 = np.int8(1) @@ -22,62 +38,120 @@ MIN_INT8 = np.iinfo(np.int8).min -@unique class ConnType(Enum): + """Basic connection enum type.""" + + pass + + +@unique +class GeneralConnType(ConnType): MatConn = auto() """General matrix connection.""" One2One = auto() """One-to-one connection.""" - BYPASS = auto() + Identity = auto() + """Identity connection with scaling factor.""" All2All = auto() """All-to-all connection.""" -def _get_weight_precision(weight: np.ndarray, enable_wp_opt: bool) -> WP: - """Get the actual weight_precision of the weight.""" - _max = np.max(weight, axis=None).astype(np.int32) - _min = np.min(weight, axis=None).astype(np.int32) +def _set_coarse_dtype(raw_w: DataArrayType) -> WeightType: + """Convert raw weights to `np.ndarray` coarsely (without optimization). + + Description: + - For weights of type `bool` or `np.bool_`, set `np.bool_` as the dtype. + - For integer scalar weight, set the dtype according to its value. + - For array weights, set the dtype according to its minimum & maximum values. For weights in the\ + range of int8, the dtype when declared will be followed (i.e. not optimized). + + NOTE: Only when the weight is input in integer scalar form, the weight precision will be optimized \ + automatically. 0/1 is treated as bool_ while others are treated as int8. The weights must not \ + exceed the range of int8. + """ + if isinstance(raw_w, (bool, np.bool_, int, np.integer)): + if raw_w > MAX_INT8 or raw_w < MIN_INT8: + raise ValueError(f"weight out of range int8, got {raw_w}.") + + if raw_w <= MAX_INT1 and raw_w >= MIN_INT1: + _dtype = np.bool_ + else: + _dtype = np.int8 + + return np.asarray(raw_w, dtype=_dtype) + + # Convert list or tuple to np.ndarray + _array = np.asarray(raw_w) + _max = np.max(_array, axis=None) + _min = np.min(_array, axis=None) if _max > MAX_INT8 or _min < MIN_INT8: - raise ValueError(f"Weight precision out of range, [{_min}, {_max}]") + raise ValueError(f"weight out of range int8, got [{_min}, {_max}].") + + if _array.dtype > np.int8: + # XXX If it is automatically optimized to int8, it cannot be converted using the 'same_kind' rule. + # if _max <= MAX_INT1 and _min >= MIN_INT1: + # warnings.warn( + # f"dtype of weight is optimized automatically, {_array.dtype} -> bool.", + # AutoOptimizationWarning, + # ) + # _dtype = np.bool_ + # else: + warnings.warn( + f"dtype of weight is optimized automatically, {_array.dtype} -> int8.", + AutoOptimizationWarning, + ) + _dtype = np.int8 + + elif _array.dtype == np.bool_ or _array.dtype == np.int8: + _dtype = _array.dtype + else: + raise TypeError(f"weights must be bool or int8, but got {_array.dtype}.") - if _max <= MAX_INT1 and _min >= MIN_INT1: - return WP.WEIGHT_WIDTH_1BIT - elif enable_wp_opt: - if _max <= MAX_INT2 and _min >= MIN_INT2: + return _array.astype(_dtype, casting="same_kind") + + +def _get_weight_precision(weight: WeightType, enable_wp_opt: bool) -> WP: + """Get the actual weight_precision of the weight.""" + _max = np.max(weight, axis=None) + _min = np.min(weight, axis=None) + + if enable_wp_opt: + if _max <= MAX_INT1 and _min >= MIN_INT1: + return WP.WEIGHT_WIDTH_1BIT + elif _max <= MAX_INT2 and _min >= MIN_INT2: return WP.WEIGHT_WIDTH_2BIT elif _max <= MAX_INT4 and _min >= MIN_INT4: return WP.WEIGHT_WIDTH_4BIT else: return WP.WEIGHT_WIDTH_8BIT else: - return WP.WEIGHT_WIDTH_8BIT + # If weight precision opt is disabled, return WP1 if dtype is np.bool_ else WP8. + if weight.dtype == np.bool_: + return WP.WEIGHT_WIDTH_1BIT + else: + return WP.WEIGHT_WIDTH_8BIT class Transform: - weights: WeightType - """The actual weights in synapse. Must stored in `np.int8` format.""" + def __init__(self, weights: DataArrayType) -> None: + self.weights = _set_coarse_dtype(weights) - def __call__(self, *args, **kwargs) -> NDArray[np.int32]: + """The actual weights in synapses. Stored in `np.bool_` or `np.int8` format.""" + self.weights.setflags(write=False) + + def __call__(self, *args, **kwargs) -> SynOutType: + """Ensure that in all subclasses, the output dimensions are (M,).""" raise NotImplementedError def _get_wp(self, enable_wp_opt: bool) -> WP: - """Precision of weights.""" return _get_weight_precision(self.weights, enable_wp_opt) @property - def conn_dtype(self) -> Union[Type[np.bool_], Type[np.int8]]: - # The value of `enable_wp_opt` dosen't effect the dtype of `connectivity`. - if self._get_wp(enable_wp_opt=False) is WP.WEIGHT_WIDTH_1BIT: - return np.bool_ - else: - return np.int8 - - @property - def connectivity(self) -> NDArray[Union[np.bool_, np.int8]]: + def connectivity(self) -> WeightType: """The connectivity matrix in `np.ndarray` format.""" raise NotImplementedError @@ -88,136 +162,194 @@ def __init__(self, num: int, weights: DataArrayType) -> None: Arguments: - num: number of neurons. - weights: synaptic weights. The shape must be a scalar or array (num,). - If `weights` is a scalar(ndim = 0), the connectivity matrix will be: - [[x, 0, 0] - [0, x, 0] - [0, 0, x]] - - Or `weights` is an array(ndim = 1), [x, y, z] corresponding to the weights of \ - the post-neurons respectively. The connectivity matrix will be: - [[x, 0, 0] - [0, y, 0] - [0, 0, z]] + - weights is a scalar(ndim = 0), the connectivity matrix will be: + [[x, 0, 0] + [0, x, 0] + [0, 0, x]] + - weights is an array(ndim = 1), [x, y, z] corresponding to the weights \ + of the post-neurons respectively. The connectivity matrix will be: + [[x, 0, 0] + [0, y, 0] + [0, 0, z]] """ self.num = num if isinstance(weights, np.ndarray) and not is_shape(weights, (num,)): - raise ShapeError( - f"Excepted shape is ({num},), but we got shape {weights.shape}" - ) + raise ShapeError(f"expected shape is ({num},), but got {weights.shape}.") - # The ndim of weights = 0 or 1. - self.weights = np.asarray(weights, dtype=np.int8) - - if not self.weights.ndim in (0, 1): - raise ShapeError(f"The ndim of weights must be 0 or 1.") + super().__init__(weights) - def __call__(self, x: np.ndarray, *args, **kwargs) -> NDArray[np.int32]: - output = x * self.weights.copy() + # The ndim of weights = 0 or 1. + if self.weights.ndim not in (0, 1): + raise ShapeError( + f"the ndim of weights must be 0 or 1, but got {self.weights.ndim}." + ) - return output.astype(np.int32) + def __call__(self, x: np.ndarray, *args, **kwargs) -> SynOutType: + # (N,) * (N,) -> (N,) + return x * self.weights.astype(np.int32) @property def connectivity(self): return ( - (self.weights * np.eye(self.num, dtype=np.bool_)).astype(self.conn_dtype) + (self.weights * np.eye(self.num, dtype=np.bool_)) if self.weights.ndim == 0 - else np.diag(self.weights).astype(self.conn_dtype) + else np.diag(self.weights) ) -class ByPass(OneToOne): - def __init__(self, num: int) -> None: +class Identity(OneToOne): + def __init__(self, num: int, scaling_factor: IntScalarType = 1) -> None: """ Arguments: - num: number of neurons. - - NOTE: The weights are always 1. + - scaling_factor: scaling factor. """ - super().__init__(num, 1) + super().__init__(num, scaling_factor) class AllToAll(Transform): - def __init__(self, conn_size: Tuple[int, int], weights: DataArrayType) -> None: + def __init__(self, conn_size: Size2Type, weights: DataArrayType) -> None: """ Arguments: - - num_in: number of source neurons. - - num_out: number of destination neurons. + - conn_size: size of connections. - weights: synaptic weights. The shape must be a scalar or a matrix. - If `weights` is a scalar(ndim = 0), the connectivity matrix will be: - [[x, x, x] - [x, x, x] - [x, x, x]] - - Or `weights` is a matrix(ndim = 2), then the connectivity matrix will be: - [[a, b, c] - [d, e, f] - [g, h, i]] + - when weights is a scalar(ndim = 0), the connectivity matrix will be: \ + x * I + - when weights is a matrix(ndim = 2), the connectivity matrix will be: \ + [[a, b, c] + [d, e, f] + [g, h, i]] """ self.conn_size = conn_size if isinstance(weights, np.ndarray) and not is_shape(weights, conn_size): - raise ShapeError( - f"Excepted shape is {conn_size}, but we got shape {weights.shape}" - ) + raise ShapeError(f"expected shape is {conn_size}, but got {weights.shape}.") - self.weights = np.asarray(weights, dtype=np.int8) + super().__init__(weights) - if not self.weights.ndim in (0, 2): - raise ShapeError(f"The ndim of weights must be 0 or 2.") + if self.weights.ndim not in (0, 2): + raise ShapeError( + f"the ndim of weights must be 0 or 2, but got {self.weights.ndim}." + ) - def __call__(self, x: np.ndarray, *args, **kwargs) -> NDArray[np.int32]: + def __call__(self, x: np.ndarray, *args, **kwargs) -> SynOutType: """ - - When weights is a scalar, the output is a scalar. (Risky, DO NOT USE) - - When weights is a matrix, the output is the dot product of `x` & `weights`. + NOTE: + - When weights is a scalar, the output is a scalar (sum * w) & repeated \ + `conn_size[1]` times. + - When weights is a matrix, the output is the dot product of `x` & weights. """ if self.weights.ndim == 0: sum_x = np.sum(x, axis=None, dtype=np.int32) - output = self.weights * np.full((self.conn_size[1],), sum_x, dtype=np.int32) - # Risky - # output = self.weights * sum_x + # (M,) + output = np.full((self.conn_size[1],), self.weights * sum_x, dtype=np.int32) else: - output = x @ self.weights.copy().astype(np.int32) + # (N,) @ (N, M) -> (M,) + output = x @ self.weights.astype(np.int32) - return output.astype(np.int32) + return output @property def connectivity(self): return ( - self.weights.astype(self.conn_dtype) + self.weights if self.weights.ndim == 2 - else (self.weights * np.ones(self.conn_size, dtype=np.bool_)).astype( - self.conn_dtype - ) + else (self.weights * np.ones(self.conn_size, dtype=np.bool_)) ) class MaskedLinear(Transform): + def __init__(self, conn_size: Size2Type, weights: np.ndarray) -> None: + if not is_shape(weights, conn_size): + raise ShapeError(f"expected shape is {conn_size}, but got {weights.shape}.") + + super().__init__(weights) + + def __call__(self, x: np.ndarray, *args, **kwargs) -> SynOutType: + # (N,) @ (N, M) -> (M,) + return x @ self.weights.astype(np.int32) + + @property + def connectivity(self): + return self.weights + + +class Conv1dForward(Transform): def __init__( self, - conn_size: Tuple[int, int], - weights: np.ndarray, + in_shape: Size1Type, + out_shape: Size1Type, + kernel: np.ndarray, + stride: Size1Type, + padding: Size1Type, + # fm_order: _Order2d, ) -> None: - """ - Arguments: - - conn: connector. Only support `MatConn`. - - weights: unmasked weights. - """ - self.conn_size = conn_size - - if not is_shape(weights, self.conn_size): - raise ShapeError( - f"Excepted shape is {conn_size}, but we got {weights.shape}" - ) + self.in_shape = in_shape + self.out_shape = out_shape + self.stride = stride + self.padding = padding + # self.fm_order = fm_order + + super().__init__(kernel) + + def __call__(self, x: np.ndarray, *args, **kwargs) -> SynOutType: + cin = self.weights.shape[1] + + # if self.fm_order == "LC": + # # (N,) -> (L, C) -> (C, L) + # _x = x.reshape(self.in_shape + (cin,)).T + # else: + _x = x.reshape((cin,) + self.in_shape) + + return _conv1d_faster( + _x, + self.out_shape, + self.weights, + self.stride, + self.padding, + ) - # Element-wise Multiplication - self.weights = np.asarray(weights, dtype=np.int8) + @property + def connectivity(self): + return _conv1d_unroll(self.in_shape, self.out_shape, self.weights, self.stride) - def __call__(self, x: np.ndarray, *args, **kwargs) -> NDArray[np.int32]: - output = x @ self.weights.copy().astype(np.int32) - return output.astype(np.int32) +class Conv2dForward(Transform): + def __init__( + self, + in_shape: Size2Type, + out_shape: Size2Type, + kernel: np.ndarray, + stride: Size2Type, + padding: Size2Type, + # fm_order: _Order3d, + ) -> None: + self.in_shape = in_shape + self.out_shape = out_shape + self.stride = stride + self.padding = padding + # self.fm_order = fm_order + + super().__init__(kernel) + + def __call__(self, x: np.ndarray, *args, **kwargs) -> SynOutType: + cin = self.weights.shape[1] + + # if self.fm_order == "HWC": + # # (N,) -> (H, W, C) -> (C, H, W) + # _x = x.reshape(self.in_shape + (cin,)).transpose(2, 0, 1) + # else: + _x = x.reshape((cin,) + self.in_shape) + + return _conv2d_faster( + _x, + self.out_shape, + self.weights, + self.stride, + self.padding, + ) @property def connectivity(self): - return self.weights.astype(self.conn_dtype) + return _conv2d_unroll(self.in_shape, self.out_shape, self.weights, self.stride) diff --git a/paibox/tools.py b/paibox/tools.py new file mode 100644 index 00000000..12fa09e6 --- /dev/null +++ b/paibox/tools.py @@ -0,0 +1,13 @@ +_PLIB_BASE_INTRO = """ +To install or update to the latest version, pip install paicorelib. + +To use the development version, pip install --pre paicorelib.""" + +PLIB_INSTALL_INTRO = ( + "\nPAIBox requires paicorelib, please install it.\n" + _PLIB_BASE_INTRO +) + +PLIB_UPDATE_INTRO = ( + "\nThe minimum required version of paicorelib is {0}, but the current version is {1}.\n" + + _PLIB_BASE_INTRO +) diff --git a/paibox/types.py b/paibox/types.py index 794c3392..b7e709de 100644 --- a/paibox/types.py +++ b/paibox/types.py @@ -1,4 +1,3 @@ -import sys from typing import ( AbstractSet, Any, @@ -11,26 +10,24 @@ Optional, Tuple, TypeVar, + Union, ) import numpy as np from numpy.typing import NDArray -if sys.version_info >= (3, 10): - from typing import TypeAlias -else: - from typing_extensions import TypeAlias - Shape = TypeVar("Shape", int, Tuple[int, ...], List[int]) ArrayType = TypeVar("ArrayType", List[int], Tuple[int, ...], np.ndarray) Scalar = TypeVar("Scalar", int, float, np.generic) -IntScalarType = TypeVar("IntScalarType", int, np.integer) -DataType = TypeVar("DataType", int, np.integer, np.ndarray) +IntScalarType = TypeVar("IntScalarType", int, np.bool_, np.integer) +DataType = TypeVar("DataType", int, np.bool_, np.integer, np.ndarray) DataArrayType = TypeVar( - "DataArrayType", int, np.integer, List[int], Tuple[int, ...], np.ndarray + "DataArrayType", int, np.bool_, np.integer, List[int], Tuple[int, ...], np.ndarray ) -SpikeType: TypeAlias = NDArray[np.bool_] -WeightType: TypeAlias = NDArray[np.int8] # raw int8 weights +SpikeType = NDArray[np.bool_] +SynOutType = NDArray[np.int32] +VoltageType = NDArray[np.int32] +WeightType = NDArray[Union[np.bool_, np.int8]] _T = TypeVar("_T") @@ -80,4 +77,6 @@ def __ior__(self, other: Iterable[Any]) -> "OrderedSet[_T]": return self def __hash__(self) -> NoReturn: - raise TypeError("'OrderedSet' is not hashable (use 'FrozenOrderedSet' instead)") + raise TypeError( + "'OrderedSet' is not hashable (use 'FrozenOrderedSet' instead)." + ) diff --git a/paibox/utils.py b/paibox/utils.py index fdf5f327..eb15cde5 100644 --- a/paibox/utils.py +++ b/paibox/utils.py @@ -24,7 +24,7 @@ def check_elem_unique(obj: Any) -> bool: return True - raise TypeError(f"Unsupported type: {type(obj)}") + raise TypeError(f"unsupported type: {type(obj)}.") def count_unique_elem(obj: Iterable[Any]) -> int: @@ -46,7 +46,7 @@ def check_elem_same(obj: Any) -> bool: if isinstance(obj, dict): return len(set(obj.values())) == 1 - raise TypeError(f"Unsupported type: {type(obj)}") + raise TypeError(f"unsupported type: {type(obj)}.") def is_nested_obj(obj_on_top: Any) -> bool: @@ -60,16 +60,13 @@ def shape2num(shape: Shape) -> int: """Convert a shape to a number""" if isinstance(shape, int): return shape - - if isinstance(shape, (list, tuple)): + else: a = 1 for b in shape: a *= b return a - raise TypeError(f"Unsupported type: {type(shape)}") - def as_shape(shape, min_dim: int = 0) -> Tuple[int, ...]: """Convert a shape to a tuple, like (1,), (10,), or (10, 20)""" @@ -78,7 +75,7 @@ def as_shape(shape, min_dim: int = 0) -> Tuple[int, ...]: elif is_iterable(shape): _shape = tuple(shape) else: - raise ValueError(f"Cannot make a shape for {shape}") + raise ValueError(f"cannot make a shape for {shape}.") if len(_shape) < min_dim: _shape = tuple([1] * (min_dim - len(_shape))) + _shape @@ -88,7 +85,7 @@ def as_shape(shape, min_dim: int = 0) -> Tuple[int, ...]: def is_shape(x, shape: Shape) -> bool: if not is_array_like(x): - raise TypeError(f"Only support an array-like type: {x}") + raise TypeError(f"only support an array-like type: {x}.") _x = np.asarray(x) return _x.shape == as_shape(shape) diff --git a/poetry.lock b/poetry.lock index 0d56ac62..685f01d2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -54,6 +54,67 @@ type = "legacy" url = "https://pypi.tuna.tsinghua.edu.cn/simple" reference = "tsinghua" +[[package]] +name = "filelock" +version = "3.13.3" +description = "A platform independent file lock." +optional = true +python-versions = ">=3.8" +files = [ + {file = "filelock-3.13.3-py3-none-any.whl", hash = "sha256:5ffa845303983e7a0b7ae17636509bc97997d58afeafa72fb141a17b152284cb"}, + {file = "filelock-3.13.3.tar.gz", hash = "sha256:a79895a25bbefdf55d1a2a0a80968f7dbb28edcd6d4234a0afb3f37ecde4b546"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +typing = ["typing-extensions (>=4.8)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "fsspec" +version = "2024.3.1" +description = "File-system specification" +optional = true +python-versions = ">=3.8" +files = [ + {file = "fsspec-2024.3.1-py3-none-any.whl", hash = "sha256:918d18d41bf73f0e2b261824baeb1b124bcf771767e3a26425cd7dec3332f512"}, + {file = "fsspec-2024.3.1.tar.gz", hash = "sha256:f39780e282d7d117ffb42bb96992f8a90795e4d0fb0f661a70ca39fe9c43ded9"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +devel = ["pytest", "pytest-cov"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +tqdm = ["tqdm"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + [[package]] name = "iniconfig" version = "2.0.0" @@ -70,6 +131,147 @@ type = "legacy" url = "https://pypi.tuna.tsinghua.edu.cn/simple" reference = "tsinghua" +[[package]] +name = "jinja2" +version = "3.1.3" +description = "A very fast and expressive template engine." +optional = true +python-versions = ">=3.7" +files = [ + {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"}, + {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "markupsafe" +version = "2.1.5" +description = "Safely add untrusted strings to HTML/XML markup." +optional = true +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, + {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "mpmath" +version = "1.3.0" +description = "Python library for arbitrary-precision floating-point arithmetic" +optional = true +python-versions = "*" +files = [ + {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, + {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, +] + +[package.extras] +develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] +docs = ["sphinx"] +gmpy = ["gmpy2 (>=2.1.0a4)"] +tests = ["pytest (>=4.6)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "networkx" +version = "3.1" +description = "Python package for creating and manipulating graphs and networks" +optional = true +python-versions = ">=3.8" +files = [ + {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, + {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, +] + +[package.extras] +default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] +developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] +doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] +test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + [[package]] name = "numpy" version = "1.24.4" @@ -114,13 +316,13 @@ reference = "tsinghua" [[package]] name = "packaging" -version = "23.2" +version = "24.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.7" files = [ - {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, - {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, + {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, + {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, ] [package.source] @@ -130,13 +332,13 @@ reference = "tsinghua" [[package]] name = "paicorelib" -version = "0.0.12" +version = "0.0.13" description = "Library of PAICORE 2.0 in Python." optional = false python-versions = ">=3.8,<4.0" files = [ - {file = "paicorelib-0.0.12-py3-none-any.whl", hash = "sha256:182de7dd543fdbffb2b59cec61982028210d7cc4a7caf7d932085055ddaf7505"}, - {file = "paicorelib-0.0.12.tar.gz", hash = "sha256:d84910710e3c634d4316c50f071224dfa1763f8438d49b2ce986601add83bf07"}, + {file = "paicorelib-0.0.13-py3-none-any.whl", hash = "sha256:0e6f56b55d59cffe0ced7920396c096e4024aa9774b71fab57a6ec02ebc25f8a"}, + {file = "paicorelib-0.0.13.tar.gz", hash = "sha256:0112a5097ecd3899409f6d51930d88071922a576bd780038f84fa30120d90286"}, ] [package.dependencies] @@ -170,13 +372,13 @@ reference = "tsinghua" [[package]] name = "pydantic" -version = "2.6.3" +version = "2.6.4" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.6.3-py3-none-any.whl", hash = "sha256:72c6034df47f46ccdf81869fddb81aade68056003900a8724a4f160700016a2a"}, - {file = "pydantic-2.6.3.tar.gz", hash = "sha256:e07805c4c7f5c6826e33a1d4c9d47950d7eaf34868e2690f8594d2e30241f11f"}, + {file = "pydantic-2.6.4-py3-none-any.whl", hash = "sha256:cc46fce86607580867bdc3361ad462bab9c222ef042d3da86f2fb333e1d916c5"}, + {file = "pydantic-2.6.4.tar.gz", hash = "sha256:b1704e0847db01817624a6b86766967f552dd9dbf3afba4004409f908dcc84e6"}, ] [package.dependencies] @@ -315,6 +517,25 @@ type = "legacy" url = "https://pypi.tuna.tsinghua.edu.cn/simple" reference = "tsinghua" +[[package]] +name = "sympy" +version = "1.12" +description = "Computer algebra system (CAS) in Python" +optional = true +python-versions = ">=3.8" +files = [ + {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, + {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, +] + +[package.dependencies] +mpmath = ">=0.19" + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + [[package]] name = "tomli" version = "2.0.1" @@ -331,6 +552,42 @@ type = "legacy" url = "https://pypi.tuna.tsinghua.edu.cn/simple" reference = "tsinghua" +[[package]] +name = "torch" +version = "2.2.2+cpu" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = true +python-versions = ">=3.8.0" +files = [ + {file = "torch-2.2.2+cpu-cp310-cp310-linux_x86_64.whl", hash = "sha256:02c4fac3c964e73f5f49003e0060c697f73b67c10cc23f51c592facb29e1bd53"}, + {file = "torch-2.2.2+cpu-cp310-cp310-win_amd64.whl", hash = "sha256:fc29dda2795dd7220d769c5926b1c50ddac9b4827897e30a10467063691cdf54"}, + {file = "torch-2.2.2+cpu-cp311-cp311-linux_x86_64.whl", hash = "sha256:90089cae572672fb449c8ff1dc1b29daaffa117bf97ede7463dcd2fd1b991e4c"}, + {file = "torch-2.2.2+cpu-cp311-cp311-win_amd64.whl", hash = "sha256:88e63c916e3275fa30a220ee736423a95573b96072ded85e5c0171fd8f37a755"}, + {file = "torch-2.2.2+cpu-cp312-cp312-linux_x86_64.whl", hash = "sha256:431a747b5a880cf8e1fb6d58db6bfafa6768cbec76517d046854537c03323edf"}, + {file = "torch-2.2.2+cpu-cp312-cp312-win_amd64.whl", hash = "sha256:2b0cf041f878607a361116945f82ce2dba4b7a747151da7619a63cb5fccb72df"}, + {file = "torch-2.2.2+cpu-cp38-cp38-linux_x86_64.whl", hash = "sha256:8914ce932168e572a09b4a7e5b0806d279f771dfe58d7e1d8de2291fac4ce69b"}, + {file = "torch-2.2.2+cpu-cp38-cp38-win_amd64.whl", hash = "sha256:4ef2911ffde6d86f643c23aa99f25f1a1df8bee93bf8d0c69cf1b9ba0ca521dc"}, + {file = "torch-2.2.2+cpu-cp39-cp39-linux_x86_64.whl", hash = "sha256:6e3d323a21df22415770e88d39e13591079b9356dabb8b394d1ee29ac6c92481"}, + {file = "torch-2.2.2+cpu-cp39-cp39-win_amd64.whl", hash = "sha256:c2c9e7d5e3c7d58e4b78d6aebfa8002af7cda16cde08d0e3ed00300dc21a8efc"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +networkx = "*" +sympy = "*" +typing-extensions = ">=4.8.0" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] +optree = ["optree (>=0.9.1)"] + +[package.source] +type = "legacy" +url = "https://download.pytorch.org/whl/cpu" +reference = "torch-cpu" + [[package]] name = "typing-extensions" version = "4.10.0" @@ -350,4 +607,4 @@ reference = "tsinghua" [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "e4c053aefe29f7d01e3bda3d670b36a95b8834fa54e999ac239a04d0e2dbaf65" +content-hash = "e9d4e900216862445287836ea2d3790f942e612c2c95a9429b135fc40e802969" diff --git a/pyproject.toml b/pyproject.toml index ca9a6e57..943d1f11 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "paibox" -version = "1.0.0a3" +version = "1.0.0b1" description = "New toolbox of PAICORE 2.0." authors = ["Ziru Pan "] maintainers = ["Ziru Pan "] @@ -10,22 +10,40 @@ repository = "https://github.com/PAICookers/PAIBox" homepage = "https://github.com/PAICookers/PAIBox" documentation = "https://github.com/PAICookers/PAIBox#readme" keywords = ["PAICORE 2.0", "PAIBox", "Toolbox"] +classifiers = [ + "Intended Audience :: Science/Research", + "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries", +] packages = [{ include = "paibox" }] # Includes the document include = ["docs", "CHANGELOG.md"] # Excludes the experimental code -exclude = ["paibox/backend/experimental"] +exclude = ["paibox/experimental"] [tool.poetry.dependencies] python = "^3.8" pydantic = "^2.0" -numpy = "^1.23.0" -paicorelib = "0.0.12" +numpy = "^1.24.0" +paicorelib = "0.0.13" + +[tool.poetry.group.test] +optional = true [tool.poetry.group.test.dependencies] pytest = { version = "^7.4.0", python = "^3.8" } +torch = { version = "^2.2.1+cpu", optional = true, source = "torch-cpu" } [tool.pytest.ini_options] diff --git a/tests/backend/__init__.py b/tests/backend/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/backend/conftest.py b/tests/backend/conftest.py index 629ac717..ac11f325 100644 --- a/tests/backend/conftest.py +++ b/tests/backend/conftest.py @@ -3,6 +3,7 @@ import tempfile from functools import partial from pathlib import Path +from typing import Optional import numpy as np import pytest @@ -21,10 +22,17 @@ from paicorelib import WeightPrecision as WP import paibox as pb -from paibox.backend.conf_template import CoreConfig, CorePlacementConfig, NeuronConfig +from paibox.backend.conf_template import ( + CoreConfig, + CorePlacementConfig, + EmptyCorePlacementConfig, + NeuronConfig, +) from paibox.backend.routing import RoutingCluster -from paibox.generic import clear_name_cache +from paibox.exceptions import ResourceError +from paibox.naming import clear_name_cache from paibox.node import NodeList +from tests.conftest import ParametrizedTestData @pytest.fixture(scope="module") @@ -38,6 +46,9 @@ def ensure_dump_dir(): f.unlink() yield p + # Clean up + # for f in p.iterdir(): + # f.unlink() @pytest.fixture @@ -97,21 +108,24 @@ def __init__(self): self.n1 = pb.TonicSpiking(2000, 3, name="n1_1", tick_wait_start=1) self.n2 = pb.TonicSpiking(1200, 3, name="n2_1", tick_wait_start=2) self.n3 = pb.TonicSpiking(800, 4, name="n3_1", tick_wait_start=3) - self.s1 = pb.NoDecay( - self.inp1, self.n1, conn_type=pb.synapses.ConnType.All2All, name="s1_1" + self.s1 = pb.FullConn( + self.inp1, self.n1, conn_type=pb.SynConnType.All2All, name="s1_1" ) - self.s2 = pb.NoDecay( - self.n1, self.n2, conn_type=pb.synapses.ConnType.All2All, name="s2_1" + self.s2 = pb.FullConn( + self.n1, self.n2, conn_type=pb.SynConnType.All2All, name="s2_1" ) - self.s3 = pb.NoDecay( - self.n2, self.n3, conn_type=pb.synapses.ConnType.All2All, name="s3_1" + self.s3 = pb.FullConn( + self.n2, self.n3, conn_type=pb.SynConnType.All2All, name="s3_1" ) class NetForTest2(pb.Network): - """ - INP1 -> S1 -> N1 -> S3 -> N2 - INP2 -> S2 -> N1 + """Test the following situations with multiple input nodes: + 1. Two input nodes assigned within one core block. + + Structure: + INP1 -> S1 -> N1 -> S3 -> N2 + INP2 -> S2 -> N1 """ def __init__(self): @@ -120,14 +134,14 @@ def __init__(self): self.inp2 = pb.InputProj(input=1, shape_out=(50,), name="inp2_2") self.n1 = pb.TonicSpiking(30, 3, name="n1_2", tick_wait_start=1) self.n2 = pb.TonicSpiking(20, 3, name="n2_2", tick_wait_start=2) - self.s1 = pb.NoDecay( - self.inp1, self.n1, conn_type=pb.synapses.ConnType.All2All, name="s1_2" + self.s1 = pb.FullConn( + self.inp1, self.n1, conn_type=pb.SynConnType.All2All, name="s1_2" ) - self.s2 = pb.NoDecay( - self.inp2, self.n1, conn_type=pb.synapses.ConnType.All2All, name="s2_2" + self.s2 = pb.FullConn( + self.inp2, self.n1, conn_type=pb.SynConnType.All2All, name="s2_2" ) - self.s3 = pb.NoDecay( - self.n1, self.n2, conn_type=pb.synapses.ConnType.All2All, name="s3_2" + self.s3 = pb.FullConn( + self.n1, self.n2, conn_type=pb.SynConnType.All2All, name="s3_2" ) @@ -145,20 +159,20 @@ def __init__(self): self.n3 = pb.TonicSpiking(400, 4, name="n3", tick_wait_start=4) self.n4 = pb.TonicSpiking(300, 4, name="n4", tick_wait_start=2) - self.s1 = pb.NoDecay( - self.inp, self.n1, conn_type=pb.synapses.ConnType.One2One, name="s1" + self.s1 = pb.FullConn( + self.inp, self.n1, conn_type=pb.SynConnType.One2One, name="s1" ) - self.s2 = pb.NoDecay( - self.n1, self.n2, conn_type=pb.synapses.ConnType.All2All, name="s2" + self.s2 = pb.FullConn( + self.n1, self.n2, conn_type=pb.SynConnType.All2All, name="s2" ) - self.s3 = pb.NoDecay( - self.n2, self.n3, conn_type=pb.synapses.ConnType.All2All, name="s3" + self.s3 = pb.FullConn( + self.n2, self.n3, conn_type=pb.SynConnType.All2All, name="s3" ) - self.s4 = pb.NoDecay( - self.n1, self.n4, conn_type=pb.synapses.ConnType.All2All, name="s4" + self.s4 = pb.FullConn( + self.n1, self.n4, conn_type=pb.SynConnType.All2All, name="s4" ) - self.s5 = pb.NoDecay( - self.n4, self.n2, conn_type=pb.synapses.ConnType.All2All, name="s5" + self.s5 = pb.FullConn( + self.n4, self.n2, conn_type=pb.SynConnType.All2All, name="s5" ) @@ -168,34 +182,47 @@ class NetForTest4(pb.Network): N1 -> S3 -> N3 -> S5 -> N4 """ - def __init__(self): + def __init__(self, large_scale: bool = False): super().__init__() + self.inp1 = pb.InputProj(input=1, shape_out=(400,), name="inp1") self.n1 = pb.TonicSpiking(800, 3, name="n1", tick_wait_start=1) - self.n2 = pb.TonicSpiking(400, 4, name="n2", tick_wait_start=2) - self.n3 = pb.TonicSpiking(400, 4, name="n3", tick_wait_start=2) + + if large_scale: + self.n2 = pb.TonicSpiking(1500, 4, name="n2", tick_wait_start=2) + self.n3 = pb.TonicSpiking(1500, 4, name="n3", tick_wait_start=2) + else: + self.n2 = pb.TonicSpiking(400, 4, name="n2", tick_wait_start=2) + self.n3 = pb.TonicSpiking(400, 4, name="n3", tick_wait_start=2) + self.n4 = pb.TonicSpiking(400, 4, name="n4", tick_wait_start=3) - self.s1 = pb.NoDecay( - self.inp1, self.n1, conn_type=pb.synapses.ConnType.All2All, name="s1" + self.s1 = pb.FullConn( + self.inp1, self.n1, conn_type=pb.SynConnType.All2All, name="s1" ) - self.s2 = pb.NoDecay( - self.n1, self.n2, conn_type=pb.synapses.ConnType.All2All, name="s2" + self.s2 = pb.FullConn( + self.n1, self.n2, conn_type=pb.SynConnType.All2All, name="s2" ) - self.s3 = pb.NoDecay( - self.n1, self.n3, conn_type=pb.synapses.ConnType.All2All, name="s3" + self.s3 = pb.FullConn( + self.n1, self.n3, conn_type=pb.SynConnType.All2All, name="s3" ) - self.s4 = pb.NoDecay( - self.n2, self.n4, conn_type=pb.synapses.ConnType.One2One, name="s4" + self.s4 = pb.FullConn( + self.n2, self.n4, conn_type=pb.SynConnType.All2All, name="s4" ) - self.s5 = pb.NoDecay( - self.n3, self.n4, conn_type=pb.synapses.ConnType.One2One, name="s5" + self.s5 = pb.FullConn( + self.n3, self.n4, conn_type=pb.SynConnType.All2All, name="s5" ) -class Network_with_multi_inodes(pb.Network): - """ - INP1 -> S1 -> N1 -> S2 -> N2 - INP2 -> S3 -> N2 +class Network_with_multi_inodes1(pb.Network): + """Test the following situations with multiple input nodes: + 1. Two input nodes with their own core blocks. + 2. An input node assigned within one core block. + TODO 3. The input node is input to the middle layer. + + Structure: + INP1 -> S1 -> N1 -> S2 -> N2 + -> S3 -> N3 -> S4 -> N4 -> S5 -> N5 + INP2 -> S6 -> N6 -> S7 -> N7 -> S8 -> N5 """ def __init__(self): @@ -204,15 +231,80 @@ def __init__(self): self.inp2 = pb.InputProj(input=1, shape_out=(50,), name="inp2") self.n1 = pb.TonicSpiking(80, 2, name="n1", tick_wait_start=1) self.n2 = pb.TonicSpiking(20, 3, name="n2", tick_wait_start=2) + self.n3 = pb.TonicSpiking(20, 3, name="n3", tick_wait_start=1) + self.n4 = pb.TonicSpiking(20, 3, name="n4", tick_wait_start=2) + self.n5 = pb.TonicSpiking(40, 3, name="n5", tick_wait_start=3) + self.n6 = pb.TonicSpiking(40, 3, name="n6", tick_wait_start=1) + self.n7 = pb.TonicSpiking(40, 3, name="n7", tick_wait_start=2) + + self.s1 = pb.FullConn( + self.inp1, self.n1, conn_type=pb.SynConnType.All2All, name="s1" + ) + self.s2 = pb.FullConn( + self.n1, self.n2, conn_type=pb.SynConnType.All2All, name="s2" + ) + self.s3 = pb.FullConn( + self.inp1, self.n3, conn_type=pb.SynConnType.All2All, name="s3" + ) + self.s4 = pb.FullConn( + self.n3, self.n4, conn_type=pb.SynConnType.All2All, name="s4" + ) + self.s5 = pb.FullConn( + self.n4, self.n5, conn_type=pb.SynConnType.All2All, name="s5" + ) + self.s6 = pb.FullConn( + self.inp2, self.n6, conn_type=pb.SynConnType.All2All, name="s6" + ) + self.s7 = pb.FullConn( + self.n6, self.n7, conn_type=pb.SynConnType.All2All, name="s7" + ) + self.s8 = pb.FullConn( + self.n7, self.n5, conn_type=pb.SynConnType.All2All, name="s8" + ) + - self.s1 = pb.NoDecay( - self.inp1, self.n1, conn_type=pb.synapses.ConnType.All2All, name="s1" +class Network_with_multi_inodes2(pb.Network): + """Test the following situations with multiple input nodes: + 1. One input node assigned within more than one core block. + + Structure: + INP1 -> S1 -> N1(tws=1) -> S2 -> N2(tws=2) + -> S3 -> N3(tws=2) -> S4 -> N4(tws=3) + -> S5 -> N5(tws=2) -> S6 -> N6(tws=3) + -> S7 -> N7(tws=2/3) + """ + + def __init__(self): + super().__init__() + self.inp1 = pb.InputProj(input=1, shape_out=(40,), name="inp1") + self.n1 = pb.TonicSpiking(80, 2, name="n1", tick_wait_start=1) + self.n2 = pb.TonicSpiking(20, 3, name="n2", tick_wait_start=3) + self.n3 = pb.TonicSpiking(20, 3, name="n3", tick_wait_start=2) + self.n4 = pb.TonicSpiking(20, 3, name="n4", tick_wait_start=3) + self.n5 = pb.TonicSpiking(20, 3, name="n5", tick_wait_start=2) + self.n6 = pb.TonicSpiking(20, 3, name="n6", tick_wait_start=3) + self.n7 = pb.TonicSpiking(20, 3, name="n7", tick_wait_start=2) + + self.s1 = pb.FullConn( + self.inp1, self.n1, conn_type=pb.SynConnType.All2All, name="s1" + ) + self.s2 = pb.FullConn( + self.n1, self.n2, conn_type=pb.SynConnType.All2All, name="s2" + ) + self.s3 = pb.FullConn( + self.inp1, self.n3, conn_type=pb.SynConnType.All2All, name="s3" + ) + self.s4 = pb.FullConn( + self.n3, self.n4, conn_type=pb.SynConnType.All2All, name="s4" ) - self.s2 = pb.NoDecay( - self.n1, self.n2, conn_type=pb.synapses.ConnType.All2All, name="s2" + self.s5 = pb.FullConn( + self.inp1, self.n5, conn_type=pb.SynConnType.All2All, name="s5" ) - self.s3 = pb.NoDecay( - self.inp2, self.n2, conn_type=pb.synapses.ConnType.All2All, name="s3" + self.s6 = pb.FullConn( + self.n5, self.n6, conn_type=pb.SynConnType.All2All, name="s6" + ) + self.s7 = pb.FullConn( + self.n5, self.n7, conn_type=pb.SynConnType.All2All, name="s7" ) @@ -229,20 +321,20 @@ def __init__(self, connect_n4: bool = False): self.n2 = pb.TonicSpiking(20, 3, name="n2", tick_wait_start=2) self.n3 = pb.TonicSpiking(30, 4, name="n3", tick_wait_start=2) - self.s1 = pb.NoDecay( - self.inp1, self.n1, conn_type=pb.synapses.ConnType.All2All, name="s1" + self.s1 = pb.FullConn( + self.inp1, self.n1, conn_type=pb.SynConnType.All2All, name="s1" ) - self.s2 = pb.NoDecay( - self.n1, self.n2, conn_type=pb.synapses.ConnType.All2All, name="s2" + self.s2 = pb.FullConn( + self.n1, self.n2, conn_type=pb.SynConnType.All2All, name="s2" ) - self.s3 = pb.NoDecay( - self.n1, self.n3, conn_type=pb.synapses.ConnType.All2All, name="s3" + self.s3 = pb.FullConn( + self.n1, self.n3, conn_type=pb.SynConnType.All2All, name="s3" ) if connect_n4: self.n4 = pb.TonicSpiking(50, 4, name="n4", tick_wait_start=3) - self.s4 = pb.NoDecay( - self.n3, self.n4, conn_type=pb.synapses.ConnType.All2All, name="s4" + self.s4 = pb.FullConn( + self.n3, self.n4, conn_type=pb.SynConnType.All2All, name="s4" ) @@ -260,17 +352,17 @@ def __init__(self): self.n2 = pb.TonicSpiking(20, 3, name="n2", tick_wait_start=2) self.n3 = pb.TonicSpiking(30, 3, name="n3", tick_wait_start=2) - self.s1 = pb.NoDecay( - self.inp1, self.n1, conn_type=pb.synapses.ConnType.All2All, name="s1" + self.s1 = pb.FullConn( + self.inp1, self.n1, conn_type=pb.SynConnType.All2All, name="s1" ) - self.s2 = pb.NoDecay( - self.n1, self.n2, conn_type=pb.synapses.ConnType.All2All, name="s2" + self.s2 = pb.FullConn( + self.n1, self.n2, conn_type=pb.SynConnType.All2All, name="s2" ) - self.s3 = pb.NoDecay( - self.inp2, self.n1, conn_type=pb.synapses.ConnType.All2All, name="s3" + self.s3 = pb.FullConn( + self.inp2, self.n1, conn_type=pb.SynConnType.All2All, name="s3" ) - self.s4 = pb.NoDecay( - self.n1, self.n3, conn_type=pb.synapses.ConnType.All2All, name="s4" + self.s4 = pb.FullConn( + self.n1, self.n3, conn_type=pb.SynConnType.All2All, name="s4" ) @@ -290,10 +382,10 @@ def __init__(self, n_onodes: int): for i in range(n_onodes): self.s_list.append( - pb.NoDecay( + pb.FullConn( self.inp1, self.n_list[i], - conn_type=pb.synapses.ConnType.All2All, + conn_type=pb.SynConnType.All2All, name=f"s_{i}", ) ) @@ -317,39 +409,39 @@ def __init__(self, seed: int): self.n2 = pb.TonicSpiking(10, 4, name="n2", tick_wait_start=2) self.n3 = pb.TonicSpiking(10, 4, name="n3", tick_wait_start=2) self.n4 = pb.TonicSpiking(4, 4, name="n4", tick_wait_start=3) - self.s1 = pb.NoDecay( + self.s1 = pb.FullConn( self.inp1, self.n1, weights=rng.randint(-8, 8, size=(10, 10), dtype=np.int8), - conn_type=pb.synapses.ConnType.MatConn, + conn_type=pb.SynConnType.MatConn, name="s1", ) - self.s2 = pb.NoDecay( + self.s2 = pb.FullConn( self.n1, self.n2, weights=rng.randint(-8, 8, size=(10, 10), dtype=np.int8), - conn_type=pb.synapses.ConnType.MatConn, + conn_type=pb.SynConnType.MatConn, name="s2", ) - self.s3 = pb.NoDecay( + self.s3 = pb.FullConn( self.n1, self.n3, weights=rng.randint(-8, 8, size=(10, 10), dtype=np.int8), - conn_type=pb.synapses.ConnType.MatConn, + conn_type=pb.SynConnType.MatConn, name="s3", ) - self.s4 = pb.NoDecay( + self.s4 = pb.FullConn( self.n2, self.n4, weights=rng.randint(-8, 8, size=(10, 4), dtype=np.int8), - conn_type=pb.synapses.ConnType.MatConn, + conn_type=pb.SynConnType.MatConn, name="s4", ) - self.s5 = pb.NoDecay( + self.s5 = pb.FullConn( self.n3, self.n4, weights=rng.randint(-8, 8, size=(10, 4), dtype=np.int8), - conn_type=pb.synapses.ConnType.MatConn, + conn_type=pb.SynConnType.MatConn, name="s5", ) @@ -369,39 +461,39 @@ def __init__(self, seed: int) -> None: self.n2 = pb.TonicSpiking(10, 4, name="n2") self.n3 = pb.TonicSpiking(10, 4, name="n3") self.n4 = pb.TonicSpiking(4, 4, name="n4") - self.s1 = pb.NoDecay( + self.s1 = pb.FullConn( self.inp1, self.n1, weights=rng.randint(-128, 128, size=(10, 10), dtype=np.int8), - conn_type=pb.synapses.ConnType.MatConn, + conn_type=pb.SynConnType.MatConn, name="s1", ) - self.s2 = pb.NoDecay( + self.s2 = pb.FullConn( self.n1, self.n2, weights=rng.randint(-128, 128, size=(10, 10), dtype=np.int8), - conn_type=pb.synapses.ConnType.MatConn, + conn_type=pb.SynConnType.MatConn, name="s2", ) - self.s3 = pb.NoDecay( + self.s3 = pb.FullConn( self.n1, self.n3, weights=rng.randint(-128, 128, size=(10, 10), dtype=np.int8), - conn_type=pb.synapses.ConnType.MatConn, + conn_type=pb.SynConnType.MatConn, name="s3", ) - self.s4 = pb.NoDecay( + self.s4 = pb.FullConn( self.n2, self.n4, weights=rng.randint(-128, 128, size=(10, 4), dtype=np.int8), - conn_type=pb.synapses.ConnType.MatConn, + conn_type=pb.SynConnType.MatConn, name="s4", ) - self.s5 = pb.NoDecay( + self.s5 = pb.FullConn( self.n3, self.n4, weights=rng.randint(-128, 128, size=(10, 4), dtype=np.int8), - conn_type=pb.synapses.ConnType.MatConn, + conn_type=pb.SynConnType.MatConn, name="s5", ) @@ -414,9 +506,9 @@ def __init__(self): self.inp = pb.InputProj(1, shape_out=(3,)) - n1 = pb.neuron.TonicSpiking((3,), 2) - n2 = pb.neuron.TonicSpiking((3,), 3) - n3 = pb.neuron.TonicSpiking((3,), 4) + n1 = pb.TonicSpiking((3,), 2) + n2 = pb.TonicSpiking((3,), 3) + n3 = pb.TonicSpiking((3,), 4) n_list = pb.NodeList() n_list.append(n1) @@ -424,14 +516,63 @@ def __init__(self): n_list.append(n3) self.n_list = n_list - self.s1 = pb.synapses.NoDecay( - n_list[0], n_list[1], conn_type=pb.synapses.ConnType.All2All + self.s1 = pb.FullConn(n_list[0], n_list[1], conn_type=pb.SynConnType.All2All) + self.s2 = pb.FullConn(n_list[1], n_list[2], conn_type=pb.SynConnType.All2All) + + self.probe1 = pb.Probe(self.n_list[1], "output", name="n2_out") + + +class ReusedStruct(pb.Network): + """Reused structure: pre_n -> syn -> post_n, 8-bit""" + + def __init__(self, tws: int = 1, name: Optional[str] = None): + super().__init__(name=name) + + self.pre_n = pb.LIF((10,), 10, 2, tick_wait_start=tws) + self.post_n = pb.LIF((10,), 10, 2, tick_wait_start=tws + 1) + + w = np.random.randint(-128, 127, (10, 10), dtype=np.int8) + self.syn = pb.FullConn( + self.pre_n, self.post_n, conn_type=pb.SynConnType.All2All, weights=w + ) + + +class Nested_Net_level_2(pb.DynSysGroup): + """Level 2 nested network: inp1 -> s1 -> ReusedStruct -> s2 -> ReusedStruct""" + + def __init__(self, tws: int = 1, name: Optional[str] = None): + self.inp1 = pb.InputProj(1, shape_out=(10,)) + subnet1 = ReusedStruct(tws=tws, name="Named_Reused_0") + subnet2 = ReusedStruct(tws=tws + 2, name="Named_Reused_1") + + self.s1 = pb.FullConn( + self.inp1, + subnet1.pre_n, + conn_type=pb.SynConnType.One2One, ) - self.s2 = pb.synapses.NoDecay( - n_list[1], n_list[2], conn_type=pb.synapses.ConnType.All2All + self.s2 = pb.FullConn( + subnet1.post_n, + subnet2.pre_n, + conn_type=pb.SynConnType.One2One, ) - self.probe1 = pb.Probe(self.n_list[1], "output", name="n2_out") + super().__init__(subnet1, subnet2, name=name) + + +class Nested_Net_level_3(pb.DynSysGroup): + """Level 3 nested network: inp1 -> s1 -> Nested_Net_level_2""" + + def __init__(self): + self.inp1 = pb.InputProj(1, shape_out=(10,)) + subnet1 = Nested_Net_level_2(name="Named_Nested_Net_level_2") + + self.s1 = pb.FullConn( + self.inp1, + subnet1["Named_Reused_0"].pre_n, + conn_type=pb.SynConnType.One2One, + ) + + super().__init__(subnet1) @pytest.fixture(scope="class") @@ -445,13 +586,18 @@ def build_example_net2(): @pytest.fixture(scope="class") -def build_multi_inputproj_net(): +def build_multi_inputproj_net1(): return NetForTest2() @pytest.fixture(scope="class") def build_multi_inputproj_net2(): - return Network_with_multi_inodes() + return Network_with_multi_inodes1() + + +@pytest.fixture(scope="class") +def build_multi_inputproj_net3(): + return Network_with_multi_inodes2() @pytest.fixture(scope="class") @@ -464,6 +610,11 @@ def build_example_net4(): return NetForTest4() +@pytest.fixture(scope="class") +def build_example_net4_large_scale(): + return NetForTest4(large_scale=True) + + @pytest.fixture(scope="class") def build_multi_onodes_net(): return Network_with_multi_onodes() @@ -499,6 +650,16 @@ def build_Network_with_container(): return Network_with_container() +@pytest.fixture(scope="class") +def build_Nested_Net_level_2(): + return Nested_Net_level_2() + + +@pytest.fixture(scope="class") +def build_Nested_Net_level_3(): + return Nested_Net_level_3() + + @pytest.fixture(scope="class") def get_mapper() -> pb.Mapper: return pb.Mapper() @@ -540,7 +701,7 @@ def MockNeuronConfig() -> NeuronConfig: offset = random.randint(1, 100) interval = random.randint(1, 2) - neuron = pb.neuron.IF((n,), 3, reset_v=-1) + neuron = pb.IF((n,), 3, reset_v=-1) ns = NeuronSegment(slice(0, 0 + n, 1), offset, interval) axon_coords = [AxonCoord(0, i) for i in range(0, n)] @@ -553,7 +714,7 @@ def MockNeuronConfig() -> NeuronConfig: @pytest.fixture def MockCorePlacementConfig(MockCoreConfigDict, MockNeuronConfig): - neuron = pb.neuron.IF((100,), 3, reset_v=-1) + neuron = pb.IF((100,), 3, reset_v=-1) cpc = CorePlacementConfig.encapsulate( random.randint(1, 200), @@ -565,6 +726,11 @@ def MockCorePlacementConfig(MockCoreConfigDict, MockNeuronConfig): return cpc +@pytest.fixture +def MockEmptyCorePlacementConfig(MockCoreConfigDict): + return EmptyCorePlacementConfig.encapsulate(MockCoreConfigDict) + + def packbits_ref(bits: np.ndarray, count: int) -> int: """Pack unsigned bits into a signed integer. @@ -596,3 +762,346 @@ def packbits2(): @pytest.fixture def packbits1(): return partial(packbits_ref, count=1) + + +def n_axon2lcn_ex_proto(n_axon, n_fanin_max) -> LCN_EX: + """Convert #N(of axons) to `LCN_EX` & check. + + NOTE: LCN_EX = log2[ceil(#N/fan-in per dendrite)], where `LCN_1X` = 0. + """ + if n_axon < 1: + raise ValueError(f"the number of axons must be positive, but got {n_axon}.") + + if (lcn := ((n_axon - 1) // n_fanin_max).bit_length()) > LCN_EX.LCN_64X: + raise ResourceError( + f"required LCN extension out of range {LCN_EX.LCN_64X} ({lcn}). " + ) + + return LCN_EX(lcn) + + +class TestData: + + toposort_data = ParametrizedTestData( + args="nodes", + data=[ + ( + { + "inp1": {"n1"}, + "n1": {"n2", "n4"}, + "n2": {"n3"}, + "n3": {}, + "n4": {"n2"}, + } + ), + ( + { + "inp1": {"n1"}, + "n1": {"n2", "n5"}, + "n2": {"n3"}, + "n3": {"n4", "n6"}, + "n4": {}, + "n5": {"n3", "n6"}, + "n6": {"n7"}, + "n7": {"n4"}, + } + ), + ( + { + "inp1": {"n1"}, + "inp2": {"n4"}, + "n1": {"n2"}, + "n2": {"n3"}, + "n3": {}, + "n4": {"n5"}, + "n5": {"n3"}, + } + ), + ( + { + "inp1": {"n1"}, + "n1": {"n2", "n3"}, + "n2": {"n4"}, + "n3": {"n4"}, + "n4": {}, + } + ), + ( + { + "inp1": {"n1"}, + "n1": {"n2"}, + "n2": {"n4"}, + "n3": {"n2"}, # Headless neuron N3 + "n4": {}, + } + ), + ], + ids=[ + "one_input_1", + "one_input_2", + "multi_inputs_1", + "one_input_3", + "headless_neuron_1", + ], + ) + + get_longest_path_data = ParametrizedTestData( + args="edges, expected_path, expected_distance", + data=[ + ( + # inp1 -> n1 -> n4 -> n2 -> n3, 1+1+1+1=4 + { + "inp1": {"n1": 1}, + "n1": {"n2": 1, "n4": 1}, + "n2": {"n3": 1}, + "n3": {}, + "n4": {"n2": 1}, + }, + ["inp1", "n1", "n4", "n2", "n3"], + 4, + ), + ( + # inp1 -> n1 -> n3 -> n4, 1+2+5=8 + { + "inp1": {"n1": 1}, + "n1": {"n2": 3, "n3": 2}, + "n2": {"n4": 2}, + "n3": {"n4": 5}, + "n4": {}, + }, + ["inp1", "n1", "n3", "n4"], + 8, + ), + ( + # inp1 -> n1 -> n2 -> n3, 1+2+1=4 + { + "inp1": {"n1": 1}, + "inp2": {"n2": 1}, + "n1": {"n2": 2}, + "n2": {"n3": 1}, + "n3": {}, + }, + ["inp1", "n1", "n2", "n3"], + 4, + ), + ( + # inp1 -> n1 -> n3 -> n5, 1+2+1=4 + { + "inp1": {"n1": 1}, + "n1": {"n2": 1, "n3": 2}, + "n2": {"n4": 1, "n5": 1}, + "n3": {"n4": 1}, + "n4": {}, + "n5": {}, + }, + ["inp1", "n1", "n3", "n4"], + 4, + ), + ( + # inp2 -> n5 -> n4, 4+1=5 + { + "inp1": {"n1": 1}, + "inp2": {"n5": 4}, + "n1": {"n2": 1, "n3": 1}, + "n2": {"n5": 1}, + "n3": {"n4": 1}, + "n4": {}, + "n5": {"n4": 1}, + }, + ["inp2", "n5", "n4"], + 5, + ), + ( + {"n1": {"n2": 1}, "n2": {}}, + ["n1", "n2"], + 1, + ), + ( + {"n1": {}}, + ["n1"], + 0, + ), + ], + ids=[ + "one_input_1", + "one_input_2", + "multi_inputs_1", + "multi_outputs_1", + "multi_inputs_outputs_1", + "headless_neuron_1", + "headless_neuron_2", + ], + ) + + get_shortest_path_data = ParametrizedTestData( + args="edges, inodes, expected_path, expected_distance", + data=[ + ( + # inp1 -> n1 -> n2 -> n3, 1+1+1=3 + { + "inp1": {"n1": 1}, + "n1": {"n2": 1, "n4": 1}, + "n2": {"n3": 1}, + "n3": {}, + "n4": {"n2": 1}, + }, + ["inp1"], + ["inp1", "n1", "n2", "n3"], + 3, + ), + ( + # inp1 -> n1 -> n2 -> n3 -> n6 -> n7 -> n4 = + # 1+1+3+2+2+3=12 + { + "inp1": {"n1": 1}, + "n1": {"n2": 1, "n5": 5}, + "n2": {"n3": 3}, + "n3": {"n4": 10, "n6": 2}, + "n4": {}, + "n5": {"n3": 5, "n6": 7}, + "n6": {"n7": 2}, + "n7": {"n4": 3}, + }, + ["inp1"], + ["inp1", "n1", "n2", "n3", "n6", "n7", "n4"], + 12, + ), + ( + # inp2 -> n2 -> n3, 1+1=2 + { + "inp1": {"n1": 1}, + "inp2": {"n2": 1}, + "n1": {"n2": 2}, + "n2": {"n3": 1}, + "n3": {}, + }, + ["inp1", "inp2"], + ["inp2", "n2", "n3"], + 2, + ), + ( + # inp1 -> n1 -> n2 -> n4, 1+1+1=3 + { + "inp1": {"n1": 1}, + "n1": {"n2": 1, "n3": 2}, + "n2": {"n4": 1}, + "n3": {"n4": 1}, + "n4": {}, + }, + ["inp1"], + ["inp1", "n1", "n2", "n4"], + 3, + ), + ( + # inp1 -> n1 -> n2 -> n4, 1+1+1=3 + { + "inp1": {"n1": 1}, + "n1": {"n2": 1, "n3": 1}, + "n2": {"n4": 2}, + "n3": {"n5": 1}, + "n4": {}, + "n5": {}, + }, + ["inp1"], + ["inp1", "n1", "n3", "n5"], + 3, + ), + ( + {"n1": {"n2": 1}, "n2": {}}, + [], + ["n1", "n2"], + 1, + ), + ( + {"n1": {}}, + [], + ["n1"], + 0, + ), + ], + ids=[ + "one_input_1", + "one_input_2", + "multi_inputs_1", + "multi_outputs_1", + "multi_outputs_2", + "headless_neuron_1", + "headless_neuron_2", + ], + ) + + cflags_weight_bit_opt_data = ParametrizedTestData( + args="range, scalar, dtype, expected_wp_noopt, expected_wp_opt", + data=[ + ( + ((0, 2), (0, 2)), + 1, + (np.bool_, np.bool_), + WP.WEIGHT_WIDTH_1BIT, + WP.WEIGHT_WIDTH_1BIT, + ), + ( + ((0, 2), (0, 2)), + -1, + (np.bool_, np.bool_), + WP.WEIGHT_WIDTH_8BIT, + WP.WEIGHT_WIDTH_2BIT, + ), + ( + ((0, 2), (0, 2)), + 1, + (np.bool_, np.int8), + WP.WEIGHT_WIDTH_8BIT, + WP.WEIGHT_WIDTH_1BIT, + ), + ( + ((0, 2), (0, 2)), + -2, + (np.int8, np.bool_), + WP.WEIGHT_WIDTH_8BIT, + WP.WEIGHT_WIDTH_2BIT, + ), + ( + ((0, 2), (0, 2)), + 1, + (np.int8, np.int8), + WP.WEIGHT_WIDTH_8BIT, + WP.WEIGHT_WIDTH_1BIT, + ), + ( + ((0, 2), (-2, 2)), + -8, + (np.bool_, np.int8), + WP.WEIGHT_WIDTH_8BIT, + WP.WEIGHT_WIDTH_4BIT, + ), + ( + ((0, 2), (-2, 2)), + 7, + (np.bool_, np.int8), + WP.WEIGHT_WIDTH_8BIT, + WP.WEIGHT_WIDTH_4BIT, + ), + ( + ((0, 2), (-128, 128)), + 127, + (np.bool_, np.int8), + WP.WEIGHT_WIDTH_8BIT, + WP.WEIGHT_WIDTH_8BIT, + ), + ( + ((-2, 2), (-8, 8)), + 7, + (np.int8, np.int8), + WP.WEIGHT_WIDTH_8BIT, + WP.WEIGHT_WIDTH_4BIT, + ), + ( + ((-8, 8), (-8, 8)), + -100, + (np.int8, np.int8), + WP.WEIGHT_WIDTH_8BIT, + WP.WEIGHT_WIDTH_8BIT, + ), + ], + ) diff --git a/tests/backend/test_conf_template.py b/tests/backend/test_conf_template.py index f939adfb..15c956b6 100644 --- a/tests/backend/test_conf_template.py +++ b/tests/backend/test_conf_template.py @@ -23,3 +23,11 @@ def test_CorePlacementConfig_instance( json.dump( MockCorePlacementConfig.__json__(), f, indent=4, ensure_ascii=True ) + + def test_EmptyCorePlacementConfig_instance( + self, ensure_dump_dir, MockEmptyCorePlacementConfig + ): + with open(ensure_dump_dir / "empty_core_placement.json", "w") as f: + json.dump( + MockEmptyCorePlacementConfig.__json__(), f, indent=4, ensure_ascii=True + ) diff --git a/tests/backend/test_graphs.py b/tests/backend/test_graphs.py index d4005bb0..2b7fdcb7 100644 --- a/tests/backend/test_graphs.py +++ b/tests/backend/test_graphs.py @@ -7,56 +7,16 @@ from paibox.backend.graphs import _degree_check from paibox.exceptions import NotSupportedError +from .conftest import TestData + class TestTopoSort: @pytest.mark.parametrize( - "edges", - [ - ( - { - "inp1": {"n1"}, - "n1": {"n2", "n4"}, - "n2": {"n3"}, - "n3": {}, - "n4": {"n2"}, - } - ), - ( - { - "inp1": {"n1"}, - "n1": {"n2", "n5"}, - "n2": {"n3"}, - "n3": {"n4", "n6"}, - "n4": {}, - "n5": {"n3", "n6"}, - "n6": {"n7"}, - "n7": {"n4"}, - } - ), - ( - { - "inp1": {"n1"}, - "inp2": {"n4"}, - "n1": {"n2"}, - "n2": {"n3"}, - "n3": {}, - "n4": {"n5"}, - "n5": {"n3"}, - } - ), - ( - { - "inp1": {"n1"}, - "n1": {"n2", "n3"}, - "n2": {"n4"}, - "n3": {"n4"}, - "n4": {}, - } - ), - ], - ids=["one_input_1", "one_input_2", "multi_inputs_1", "one_input_3"], + TestData.toposort_data["args"], + TestData.toposort_data["data"], + ids=TestData.toposort_data["ids"], ) - def test_toposort(self, edges): + def test_toposort(self, nodes): """ Test #1: one input 1 INP1 -> N1 -> N2 -> N3 @@ -77,9 +37,13 @@ def test_toposort(self, edges): Test #4: one input 3 INP1 -> N1 -> N2 -> N4 N1 -> N3 -> N4 + + Test #5: headless neuron 1 + INP1 -> N1 -> N2 -> N4 + N3 -> N2 """ - ordered = toposort(edges) - assert len(ordered) == len(edges) + ordered = toposort(nodes) + assert len(ordered) == len(nodes) @pytest.mark.parametrize( "edges", @@ -424,7 +388,7 @@ def test_group_edges_with_constrs( mapper: pb.Mapper = get_mapper mapper.clear() mapper.build(net) - grouped_edges = mapper.graph.group_edges() + grouped_edges, _ = mapper.graph.group_edges() # In this case, N2 & N3 should be together. pos_n2 = pos_n3 = 0 @@ -444,7 +408,7 @@ def test_group_edges_with_constrs( mapper.clear() mapper.build(net) - grouped_edges = mapper.graph.group_edges() + grouped_edges, _ = mapper.graph.group_edges() pos_n2 = pos_n3 = 0 for i, g in enumerate(grouped_edges): @@ -481,7 +445,7 @@ def get_longest_path_proto( Return: the longest distance in the graph. """ - distances: Dict[NodeName, int] = defaultdict(int) # init value = 0 + distances: Dict[NodeName, int] = {node: 0 for node in ordered_nodes} pred_nodes: Dict[NodeName, NodeName] = defaultdict() for node in ordered_nodes: @@ -508,79 +472,9 @@ def get_longest_path_proto( return path, distance @pytest.mark.parametrize( - "edges, expected_path, expected_distance", - [ - ( - # inp1 -> n1 -> n4 -> n2 -> n3, 1+1+1+1=4 - { - "inp1": {"n1": 1}, - "n1": {"n2": 1, "n4": 1}, - "n2": {"n3": 1}, - "n3": {}, - "n4": {"n2": 1}, - }, - ["inp1", "n1", "n4", "n2", "n3"], - 4, - ), - ( - # inp1 -> n1 -> n3 -> n4, 1+2+5=8 - { - "inp1": {"n1": 1}, - "n1": {"n2": 3, "n3": 2}, - "n2": {"n4": 2}, - "n3": {"n4": 5}, - "n4": {}, - }, - ["inp1", "n1", "n3", "n4"], - 8, - ), - ( - # inp1 -> n1 -> n2 -> n3, 1+2+1=4 - { - "inp1": {"n1": 1}, - "inp2": {"n2": 1}, - "n1": {"n2": 2}, - "n2": {"n3": 1}, - "n3": {}, - }, - ["inp1", "n1", "n2", "n3"], - 4, - ), - ( - # inp1 -> n1 -> n3 -> n5, 1+2+1=4 - { - "inp1": {"n1": 1}, - "n1": {"n2": 1, "n3": 2}, - "n2": {"n4": 1, "n5": 1}, - "n3": {"n4": 1}, - "n4": {}, - "n5": {}, - }, - ["inp1", "n1", "n3", "n4"], - 4, - ), - ( - # inp2 -> n5 -> n4, 4+1=5 - { - "inp1": {"n1": 1}, - "inp2": {"n5": 4}, - "n1": {"n2": 1, "n3": 1}, - "n2": {"n5": 1}, - "n3": {"n4": 1}, - "n4": {}, - "n5": {"n4": 1}, - }, - ["inp2", "n5", "n4"], - 5, - ), - ], - ids=[ - "one_input_1", - "one_input_2", - "multi_inputs_1", - "multi_outputs_1", - "multi_inputs_outputs_1", - ], + TestData.get_longest_path_data["args"], + TestData.get_longest_path_data["data"], + ids=TestData.get_longest_path_data["ids"], ) def test_get_longest_path_proto(self, edges, expected_path, expected_distance): ordered = toposort(edges) @@ -608,8 +502,11 @@ def get_shortest_path_proto( pred_nodes: Dict[NodeName, NodeName] = defaultdict() # Set initial value for all inputs nodes. - for inode in input_nodes: - distances[inode] = 0 + if input_nodes: + for inode in input_nodes: + distances[inode] = 0 + else: + distances[ordered_nodes[0]] = 0 for node in ordered_nodes: for neighbor in edges_with_d[node]: @@ -635,86 +532,9 @@ def get_shortest_path_proto( return path, distance @pytest.mark.parametrize( - "edges, inodes, expected_path, expected_distance", - [ - ( - # inp1 -> n1 -> n2 -> n3, 1+1+1=3 - { - "inp1": {"n1": 1}, - "n1": {"n2": 1, "n4": 1}, - "n2": {"n3": 1}, - "n3": {}, - "n4": {"n2": 1}, - }, - ["inp1"], - ["inp1", "n1", "n2", "n3"], - 3, - ), - ( - # inp1 -> n1 -> n2 -> n3 -> n6 -> n7 -> n4 = - # 1+1+3+2+2+3=12 - { - "inp1": {"n1": 1}, - "n1": {"n2": 1, "n5": 5}, - "n2": {"n3": 3}, - "n3": {"n4": 10, "n6": 2}, - "n4": {}, - "n5": {"n3": 5, "n6": 7}, - "n6": {"n7": 2}, - "n7": {"n4": 3}, - }, - ["inp1"], - ["inp1", "n1", "n2", "n3", "n6", "n7", "n4"], - 12, - ), - ( - # inp2 -> n2 -> n3, 1+1=2 - { - "inp1": {"n1": 1}, - "inp2": {"n2": 1}, - "n1": {"n2": 2}, - "n2": {"n3": 1}, - "n3": {}, - }, - ["inp1", "inp2"], - ["inp2", "n2", "n3"], - 2, - ), - ( - # inp1 -> n1 -> n2 -> n4, 1+1+1=3 - { - "inp1": {"n1": 1}, - "n1": {"n2": 1, "n3": 2}, - "n2": {"n4": 1}, - "n3": {"n4": 1}, - "n4": {}, - }, - ["inp1"], - ["inp1", "n1", "n2", "n4"], - 3, - ), - ( - # inp1 -> n1 -> n2 -> n4, 1+1+1=3 - { - "inp1": {"n1": 1}, - "n1": {"n2": 1, "n3": 1}, - "n2": {"n4": 2}, - "n3": {"n5": 1}, - "n4": {}, - "n5": {}, - }, - ["inp1"], - ["inp1", "n1", "n3", "n5"], - 3, - ), - ], - ids=[ - "one_input_1", - "one_input_2", - "multi_inputs_1", - "multi_outputs_1", - "multi_outputs_2", - ], + TestData.get_shortest_path_data["args"], + TestData.get_shortest_path_data["data"], + ids=TestData.get_shortest_path_data["ids"], ) def test_get_shortest_path_proto( self, edges, inodes, expected_path, expected_distance diff --git a/tests/backend/test_mapper.py b/tests/backend/test_mapper.py index 539fb99a..366e7748 100644 --- a/tests/backend/test_mapper.py +++ b/tests/backend/test_mapper.py @@ -6,10 +6,11 @@ import numpy as np import pytest -from paicorelib import Coord, HwConfig, WeightPrecision +from paicorelib import Coord, HwConfig import paibox as pb from paibox.backend.conf_template import CoreConfig, NeuronDest, NeuronDestInfo +from paibox.exceptions import ResourceError from paibox.synapses import SynSys @@ -32,12 +33,11 @@ def default(self, o: Any) -> Any: class TestGraphInfo: - def test_multi_inputproj( - self, get_mapper, ensure_dump_dir, build_multi_inputproj_net + def test_multi_inputproj1( + self, get_mapper, ensure_dump_dir, build_multi_inputproj_net1 ): - net = build_multi_inputproj_net + net = build_multi_inputproj_net1 mapper: pb.Mapper = get_mapper - mapper.build(net) mapper.compile() mapper.export( @@ -54,7 +54,6 @@ def test_multi_inputproj2( ): net = build_multi_inputproj_net2 mapper: pb.Mapper = get_mapper - mapper.build(net) mapper.compile() mapper.export( @@ -66,14 +65,38 @@ def test_multi_inputproj2( assert len(mapper.graph_info["input"]) == 2 + def test_multi_inputproj3( + self, monkeypatch, get_mapper, ensure_dump_dir, build_multi_inputproj_net3 + ): + net = build_multi_inputproj_net3 + mapper: pb.Mapper = get_mapper + mapper.build(net) + mapper.compile() + mapper.export( + fp=ensure_dump_dir, + format="txt", + split_by_coordinate=True, + export_core_params=True, + ) + + assert len(mapper.graph_info["input"]) == 1 + assert len(mapper.core_blocks) == 6 + + monkeypatch.setattr(net.n7, "_tws", 3) # n7.tws: 2 -> 3 + mapper.clear() + mapper.build(net) + mapper.compile() + + assert len(mapper.core_blocks) == 5 # n6 & n7 grouped in one core block. + def test_multi_output_nodes( self, get_mapper, ensure_dump_dir, build_multi_onodes_net ): net = build_multi_onodes_net mapper: pb.Mapper = get_mapper - mapper.build(net) mapper.compile() + assert len(mapper.graph_info["output"]) == 2 mapper.export( @@ -88,7 +111,6 @@ def test_multi_output_nodes2( ): net = build_multi_onodes_net2 mapper: pb.Mapper = get_mapper - mapper.build(net) mapper.compile() @@ -106,12 +128,32 @@ def test_multi_inodes_onodes( ): net = build_multi_inodes_onodes mapper: pb.Mapper = get_mapper - mapper.build(net) mapper.compile() + assert len(mapper.graph_info["input"]) == 2 assert len(mapper.graph_info["output"]) == 2 + def test_nested_net_L2_compile(self, get_mapper, build_Nested_Net_level_2): + net = build_Nested_Net_level_2 + mapper: pb.Mapper = get_mapper + mapper.build(net) + mapper.compile() + + assert len(mapper.graph.nodes.keys()) == 5 + assert len(mapper.graph_info["input"]) == 1 + assert len(mapper.graph_info["output"]) == 1 + + def test_nested_net_L3_compile(self, get_mapper, build_Nested_Net_level_3): + net2 = build_Nested_Net_level_3 + mapper: pb.Mapper = get_mapper + mapper.build(net2) + mapper.compile() + + assert len(mapper.graph.edges.keys()) == 5 + assert len(mapper.graph_info["input"]) == 2 + assert len(mapper.graph_info["output"]) == 1 + class TestMapperDebug: def test_build_graph(self, get_mapper, build_example_net1, build_example_net2): @@ -120,22 +162,23 @@ def test_build_graph(self, get_mapper, build_example_net1, build_example_net2): net2 = build_example_net2 mapper: pb.Mapper = get_mapper - mapper.clear() mapper.build(net1, net2) + mapper.compile() - assert mapper.graph.has_built == True + assert len(mapper.graph.nodes.keys()) == 8 + assert len(mapper.graph_info["input"]) == 3 + assert len(mapper.graph_info["output"]) == 2 @pytest.fixture - def test_simple_net(self, get_mapper, build_example_net1): - """Go throught the backend""" + def compile_simple_net(self, get_mapper, build_example_net1): + """Reused fixture.""" net = build_example_net1 mapper: pb.Mapper = get_mapper - mapper.clear() mapper.build(net) mapper.compile() - @pytest.mark.usefixtures("test_simple_net") + @pytest.mark.usefixtures("compile_simple_net") def test_export_config_json(self, ensure_dump_dir, get_mapper): """Export all the configs into json""" mapper: pb.Mapper = get_mapper @@ -149,7 +192,7 @@ def test_export_config_json(self, ensure_dump_dir, get_mapper): ) print() - @pytest.mark.usefixtures("test_simple_net") + @pytest.mark.usefixtures("compile_simple_net") def test_find_neuron(self, get_mapper, build_example_net1): net: pb.Network = build_example_net1 mapper: pb.Mapper = get_mapper @@ -159,7 +202,7 @@ def test_find_neuron(self, get_mapper, build_example_net1): print() - @pytest.mark.usefixtures("test_simple_net") + @pytest.mark.usefixtures("compile_simple_net") def test_find_axon(self, get_mapper, build_example_net1): net: pb.Network = build_example_net1 mapper: pb.Mapper = get_mapper @@ -171,24 +214,59 @@ def test_find_axon(self, get_mapper, build_example_net1): def test_network_with_container(self, get_mapper, build_Network_with_container): net: pb.Network = build_Network_with_container - mapper: pb.Mapper = get_mapper - mapper.clear() mapper.build(net) mapper.compile() - print() + assert len(mapper.graph.nodes.keys()) == 4 + # Input projectioon is discnnected! + assert len(mapper.graph_info["input"]) == 0 + assert len(mapper.graph_info["output"]) == 1 + + def test_network_axons_outrange(self): + class Net(pb.Network): + def __init__(self): + super().__init__() + self.inp = pb.InputProj(1, shape_out=(300, 300)) + self.n1 = pb.IF((300, 300), 1, name="n1") + self.n2 = pb.IF((300,), 1, name="n2") + + self.s1 = pb.FullConn( + self.inp, self.n1, conn_type=pb.SynConnType.All2All + ) + self.s2 = pb.FullConn( + self.n1, self.n2, conn_type=pb.SynConnType.All2All + ) + + net = Net() + mapper = pb.Mapper() + mapper.build(net) + + with pytest.raises(ResourceError): + mapper.compile() # 300*300 > 1152*64 class TestMapper_Export: - def test_export_multi_nodes_more_than_32(self, build_Network_with_N_onodes): + def test_export_multi_nodes_more_than_32( + self, build_Network_with_N_onodes, ensure_dump_dir + ): net = build_Network_with_N_onodes mapper = pb.Mapper() mapper.build(net) mapper.compile() + mapper.export(fp=ensure_dump_dir) assert len(mapper.graph_info["output"].keys()) == net.n_onodes + def test_export_empty_cplm(self, build_example_net4_large_scale, ensure_dump_dir): + net = build_example_net4_large_scale + mapper = pb.Mapper() + mapper.build(net) + mapper.compile() + mapper.export(fp=ensure_dump_dir) + + assert len(mapper.routing_groups[1].wasted_coords) == 2 + class TestMapper_Weight4: @pytest.mark.skipif( @@ -357,18 +435,54 @@ def test_grouping_optim_both(self, monkeypatch, build_example_net4): class TestMapper_cflags: - def test_cflags_weight_bit_optimization(self, build_network_with_branches_4bit): - net = build_network_with_branches_4bit + from .conftest import TestData + + @pytest.mark.parametrize( + TestData.cflags_weight_bit_opt_data["args"], + TestData.cflags_weight_bit_opt_data["data"], + ) + def test_cflags_weight_bit_opt( + self, range, scalar, dtype, expected_wp_noopt, expected_wp_opt + ): + # s1, s2, s3 will be grouped in one core block. + class Net(pb.Network): + def __init__(self): + super().__init__() + self.n1 = pb.TonicSpiking(10, 3, name="n1", tick_wait_start=1) + self.n2 = pb.TonicSpiking(10, 4, name="n2", tick_wait_start=2) + self.n3 = pb.TonicSpiking(10, 4, name="n3", tick_wait_start=2) + self.n4 = pb.TonicSpiking(10, 4, name="n4", tick_wait_start=2) + self.s1 = pb.FullConn( + self.n1, + self.n2, + weights=np.random.randint(*range[0], size=(10,), dtype=dtype[0]), + conn_type=pb.SynConnType.One2One, + name="s1", + ) + self.s2 = pb.FullConn( + self.n1, + self.n3, + weights=np.random.randint(*range[1], size=(10, 10), dtype=dtype[1]), + name="s2", + ) + self.s3 = pb.FullConn( + self.n1, + self.n4, + weights=scalar, + conn_type=pb.SynConnType.All2All, + name="s3", + ) + + net = Net() mapper = pb.Mapper() mapper.build(net) - mapper.compile(weight_bit_optimization=True) - assert ( - mapper.core_blocks[0].weight_precision == WeightPrecision.WEIGHT_WIDTH_4BIT - ) + mapper.compile(weight_bit_optimization=False) + assert mapper.core_blocks[0].weight_precision == expected_wp_noopt mapper.clear() mapper.build(net) - mapper.compile(weight_bit_optimization=False) - assert ( - mapper.core_blocks[0].weight_precision == WeightPrecision.WEIGHT_WIDTH_8BIT + mapper.compile(weight_bit_optimization=True) + assert mapper.core_blocks[0].weight_precision == max( + s.weight_precision for s in (net.s1, net.s2, net.s3) ) + assert mapper.core_blocks[0].weight_precision == expected_wp_opt diff --git a/tests/backend/test_placement.py b/tests/backend/test_placement.py index 8e948c58..93aef554 100644 --- a/tests/backend/test_placement.py +++ b/tests/backend/test_placement.py @@ -4,7 +4,7 @@ from paicorelib import WeightPrecision as WP import paibox as pb -from paibox.backend.placement import NeuSeg, n_axon2lcn_ex +from paibox.backend.placement import NeuSeg from paibox.exceptions import ResourceError @@ -28,8 +28,8 @@ def test_get_raw_weight_ref(): w_of_neurons = [w1, w2] - n1 = pb.neuron.LIF((20,), 1) - n2 = pb.neuron.LIF((30,), 1) + n1 = pb.LIF((20,), 1) + n2 = pb.LIF((30,), 1) dest = [n1, n2] @@ -238,7 +238,7 @@ def test_weight_ram_mapping(self, shape, wp, nfold): if nbit > 1: w_unpacked = self._weight_ram_mapping_ref(w_folded, nbit) else: - w_unpacked = w_folded.copy().astype(np.bool_) + w_unpacked = w_folded.astype(np.bool_) w_unpacked.setflags(write=False) @@ -390,8 +390,10 @@ def test_weight_ram_mapping_2bits(self, packbits2): def test_n_axon2lcn_ex(): - lcn_ex = n_axon2lcn_ex(1152 * 18 + 1, 1152) + from .conftest import n_axon2lcn_ex_proto + + lcn_ex = n_axon2lcn_ex_proto(1152 * 18 + 1, 1152) assert lcn_ex == LCN_EX.LCN_32X with pytest.raises(ResourceError): - lcn_ex = n_axon2lcn_ex(1152 * 64 + 1, 1152) + lcn_ex = n_axon2lcn_ex_proto(1152 * 64 + 1, 1152) diff --git a/tests/backend/test_segment_utils.py b/tests/backend/test_segment_utils.py index 289c741e..f4311615 100644 --- a/tests/backend/test_segment_utils.py +++ b/tests/backend/test_segment_utils.py @@ -6,7 +6,7 @@ from paicorelib import WeightPrecision as WP import paibox as pb -from paibox.backend.placement import NeuSeg, n_axon2lcn_ex +from paibox.backend.placement import NeuSeg from paibox.backend.segment_utils import ( aligned_coords, get_axon_segments, @@ -342,14 +342,16 @@ def test_get_neu_segments_both(self, neurons, capacity, wp, lcn_ex, expected): @pytest.mark.parametrize( "axons", [ - [pb.neuron.LIF(600, 2), pb.neuron.LIF(800, 2), pb.neuron.LIF(256, 2)], - [pb.neuron.LIF(384, 3), pb.neuron.LIF(383, 3), pb.neuron.LIF(385, 3)], - [pb.neuron.LIF(1153, 2)], - [pb.neuron.LIF(2222, 1), pb.neuron.LIF(2378, 1)], + [pb.LIF(600, 2), pb.LIF(800, 2), pb.LIF(256, 2)], + [pb.LIF(384, 3), pb.LIF(383, 3), pb.LIF(385, 3)], + [pb.LIF(1153, 2)], + [pb.LIF(2222, 1), pb.LIF(2378, 1)], ], ) def test_get_axon_segments(axons): - lcn_ex = n_axon2lcn_ex(sum(axon.num_out for axon in axons), 1152) + from .conftest import n_axon2lcn_ex_proto + + lcn_ex = n_axon2lcn_ex_proto(sum(axon.num_out for axon in axons), 1152) tr_max = 1 << lcn_ex @@ -362,13 +364,15 @@ def test_get_axon_segments(axons): @pytest.mark.parametrize( "axons", [ - [pb.neuron.LIF(1151, 2), pb.neuron.LIF(1153, 2)], - [pb.neuron.LIF(1151 * 2, 2), pb.neuron.LIF(1153 * 2, 2)], + [pb.LIF(1151, 2), pb.LIF(1153, 2)], + [pb.LIF(1151 * 2, 2), pb.LIF(1153 * 2, 2)], ], ) def test_get_axon_segments_boundary(axons): """Illegal boundary cases.""" - lcn_ex = n_axon2lcn_ex(sum(axon.num_out for axon in axons), 1152) + from .conftest import n_axon2lcn_ex_proto + + lcn_ex = n_axon2lcn_ex_proto(sum(axon.num_out for axon in axons), 1152) tr_max = 1 << lcn_ex with pytest.raises(ResourceError): diff --git a/tests/conftest.py b/tests/conftest.py index 3706630b..67a47ae4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,8 +1,20 @@ +from typing import Any, List, Optional, TypedDict + +import numpy as np import pytest +from typing_extensions import NotRequired import paibox as pb +class ParametrizedTestData(TypedDict): + """Parametrized test data in dictionary format.""" + + args: str + data: List[Any] + ids: NotRequired[List[str]] + + class Input_to_N1(pb.DynSysGroup): """Not nested network inp1 -> n1 -> s1 -> n2, n3 @@ -12,8 +24,8 @@ def __init__(self): super().__init__() self.inp1 = pb.InputProj(1, shape_out=(1,)) self.n1 = pb.TonicSpiking(1, 3, tick_wait_start=2, delay=1) - self.s1 = pb.NoDecay( - self.inp1, self.n1, weights=1, conn_type=pb.synapses.ConnType.One2One + self.s1 = pb.FullConn( + self.inp1, self.n1, weights=1, conn_type=pb.SynConnType.One2One ) self.probe1 = pb.Probe(self.s1, "output", name="s2_out") @@ -33,11 +45,11 @@ def __init__(self): self.n1 = pb.TonicSpiking(1, 2, tick_wait_start=2, delay=3) self.n2 = pb.TonicSpiking(1, 2, tick_wait_start=3) - self.s1 = pb.NoDecay( - self.inp1, self.n1, weights=1, conn_type=pb.synapses.ConnType.One2One + self.s1 = pb.FullConn( + self.inp1, self.n1, weights=1, conn_type=pb.SynConnType.One2One ) - self.s2 = pb.NoDecay( - self.n1, self.n2, weights=1, conn_type=pb.synapses.ConnType.All2All + self.s2 = pb.FullConn( + self.n1, self.n2, weights=1, conn_type=pb.SynConnType.All2All ) self.probe1 = pb.Probe(self.s2, "output", name="s2_out") @@ -59,9 +71,9 @@ def __init__(self): self.inp = pb.InputProj(1, shape_out=(3,)) - n1 = pb.neuron.TonicSpiking((3,), 2) - n2 = pb.neuron.TonicSpiking((3,), 3) - n3 = pb.neuron.TonicSpiking((3,), 4) + n1 = pb.TonicSpiking((3,), 2) + n2 = pb.TonicSpiking((3,), 3) + n3 = pb.TonicSpiking((3,), 4) n_list: pb.NodeList[pb.neuron.Neuron] = pb.NodeList() n_list.append(n1) @@ -69,40 +81,12 @@ def __init__(self): n_list.append(n3) self.n_list = n_list - self.s1 = pb.synapses.NoDecay( - n_list[0], n_list[1], conn_type=pb.synapses.ConnType.All2All - ) - self.s2 = pb.synapses.NoDecay( - n_list[1], n_list[2], conn_type=pb.synapses.ConnType.All2All - ) + self.s1 = pb.FullConn(n_list[0], n_list[1], conn_type=pb.SynConnType.All2All) + self.s2 = pb.FullConn(n_list[1], n_list[2], conn_type=pb.SynConnType.All2All) self.probe1 = pb.Probe(self.n_list[1], "output", name="n2_out") -class MoreInput_Net(pb.DynSysGroup): - """Nested network, level 1. - n1 -> s1 -> n2 -> s2 -> n4 - - n3 -> s3 -> n4 - """ - - def __init__(self): - super().__init__() - self.n1 = pb.neuron.TonicSpiking(2, 3) - self.n2 = pb.neuron.TonicSpiking(2, 3) - self.s1 = pb.synapses.NoDecay( - self.n1, self.n2, conn_type=pb.synapses.ConnType.All2All - ) - self.n3 = pb.neuron.TonicSpiking(2, 4) - self.n4 = pb.neuron.TonicSpiking(2, 3) - self.s2 = pb.synapses.NoDecay( - self.n2, self.n4, conn_type=pb.synapses.ConnType.All2All - ) - self.s3 = pb.synapses.NoDecay( - self.n3, self.n4, conn_type=pb.synapses.ConnType.All2All - ) - - class Network_with_multi_inodes_onodes(pb.Network): """ INP1 -> S1 -> N1 -> S2 -> N2 @@ -117,19 +101,79 @@ def __init__(self): self.n2 = pb.TonicSpiking(20, 3, name="n2", tick_wait_start=2) self.n3 = pb.TonicSpiking(30, 3, name="n3", tick_wait_start=2) - self.s1 = pb.NoDecay( - self.inp1, self.n1, conn_type=pb.synapses.ConnType.All2All, name="s1" + self.s1 = pb.FullConn( + self.inp1, self.n1, conn_type=pb.SynConnType.All2All, name="s1" + ) + self.s2 = pb.FullConn( + self.n1, self.n2, conn_type=pb.SynConnType.All2All, name="s2" + ) + self.s3 = pb.FullConn( + self.inp2, self.n1, conn_type=pb.SynConnType.All2All, name="s3" + ) + self.s4 = pb.FullConn( + self.n1, self.n3, conn_type=pb.SynConnType.All2All, name="s4" + ) + + +class Nested_Net_L1(pb.DynSysGroup): + """Level 1 nested network: pre_n -> syn -> post_n""" + + def __init__(self, name: Optional[str] = None): + super().__init__(name=name) + + self.pre_n = pb.LIF((10,), 10) + self.post_n = pb.LIF((10,), 10) + + w = np.random.randint(-128, 127, (10, 10), dtype=np.int8) + self.syn = pb.FullConn( + self.pre_n, self.post_n, conn_type=pb.SynConnType.All2All, weights=w ) - self.s2 = pb.NoDecay( - self.n1, self.n2, conn_type=pb.synapses.ConnType.All2All, name="s2" + + +class Nested_Net_L2(pb.DynSysGroup): + """Level 2 nested network: inp1 -> s1 -> Nested_Net_L1 -> s2 -> Nested_Net_L1""" + + def __init__(self, name: Optional[str] = None): + self.inp1 = pb.InputProj(1, shape_out=(10,)) + subnet1 = Nested_Net_L1() + subnet2 = Nested_Net_L1(name="Named_SubNet_L1_1") + self.s1 = pb.FullConn( + self.inp1, + subnet1.pre_n, + conn_type=pb.SynConnType.One2One, ) - self.s3 = pb.NoDecay( - self.inp2, self.n1, conn_type=pb.synapses.ConnType.All2All, name="s3" + self.s2 = pb.FullConn( + subnet1.post_n, + subnet2.pre_n, + conn_type=pb.SynConnType.One2One, ) - self.s4 = pb.NoDecay( - self.n1, self.n3, conn_type=pb.synapses.ConnType.All2All, name="s4" + + super().__init__(subnet1, subnet2, name=name) + self.probe1 = pb.Probe(self.inp1, "spike") # won't be discovered in level 3 + + +class Nested_Net_L3(pb.DynSysGroup): + """Level 3 nested network: inp1 -> s1 -> Named_Nested_Net_L2""" + + def __init__(self): + self.inp1 = pb.InputProj(1, shape_out=(10,)) + subnet1 = Nested_Net_L2(name="Named_Nested_Net_L2") + + subnet1_of_subnet1 = subnet1[f"{Nested_Net_L1.__name__}_0"] + + self.s1 = pb.FullConn( + self.inp1, + subnet1_of_subnet1.pre_n, + conn_type=pb.SynConnType.One2One, ) + super().__init__(subnet1) + + self.probe1 = pb.Probe(self.inp1, "spike") + self.probe2 = pb.Probe(subnet1_of_subnet1.pre_n, "spike") + self.probe3 = pb.Probe(subnet1_of_subnet1.pre_n, "voltage") + self.probe4 = pb.Probe(subnet1.s1, "output") + @pytest.fixture(scope="class") def build_Input_to_N1(): @@ -157,5 +201,10 @@ def build_multi_inodes_onodes(): @pytest.fixture(scope="class") -def build_MoreInput_Net(): - return MoreInput_Net() +def build_Nested_Net_L2(): + return Nested_Net_L2() + + +@pytest.fixture(scope="class") +def build_Nested_Net_L3(): + return Nested_Net_L3() diff --git a/tests/neuron/conftest.py b/tests/neuron/conftest.py index 047f86b4..d218677d 100644 --- a/tests/neuron/conftest.py +++ b/tests/neuron/conftest.py @@ -44,15 +44,13 @@ class Net1(pb.Network): def __init__(self): super().__init__() self.inp1 = pb.InputProj(fakeout, shape_out=(2,)) - self.n1 = pb.neuron.IF((2,), 3) - self.s1 = pb.synapses.NoDecay( - self.inp1, self.n1, conn_type=pb.synapses.ConnType.One2One - ) + self.n1 = pb.IF((2,), 3) + self.s1 = pb.FullConn(self.inp1, self.n1, conn_type=pb.SynConnType.One2One) - self.probe1 = pb.simulator.Probe(self.inp1, "output") - self.probe2 = pb.simulator.Probe(self.s1, "output") - self.probe3 = pb.simulator.Probe(self.n1, "output") - self.probe4 = pb.simulator.Probe(self.n1, "voltage") + self.probe1 = pb.Probe(self.inp1, "output") + self.probe2 = pb.Probe(self.s1, "output") + self.probe3 = pb.Probe(self.n1, "output") + self.probe4 = pb.Probe(self.n1, "voltage") class Net2(pb.Network): @@ -64,21 +62,21 @@ class Net2(pb.Network): def __init__(self): super().__init__() self.inp1 = pb.InputProj(1, shape_out=(2, 2)) - self.n1 = pb.neuron.LIF((2, 2), 600, reset_v=1, leaky_v=-1) - self.s1 = pb.synapses.NoDecay( - self.inp1, self.n1, weights=127, conn_type=pb.synapses.ConnType.All2All + self.n1 = pb.LIF((2, 2), 600, reset_v=1, leak_v=-1) + self.s1 = pb.FullConn( + self.inp1, self.n1, weights=127, conn_type=pb.SynConnType.All2All ) - self.s2 = pb.synapses.NoDecay( - self.inp1, self.n1, weights=127, conn_type=pb.synapses.ConnType.All2All + self.s2 = pb.FullConn( + self.inp1, self.n1, weights=127, conn_type=pb.SynConnType.All2All ) - self.s3 = pb.synapses.NoDecay( - self.inp1, self.n1, weights=127, conn_type=pb.synapses.ConnType.All2All + self.s3 = pb.FullConn( + self.inp1, self.n1, weights=127, conn_type=pb.SynConnType.All2All ) - self.probe1 = pb.simulator.Probe(self.inp1, "output") - self.probe2 = pb.simulator.Probe(self.s1, "output") - self.probe3 = pb.simulator.Probe(self.n1, "output") - self.probe4 = pb.simulator.Probe(self.n1, "voltage") + self.probe1 = pb.Probe(self.inp1, "output") + self.probe2 = pb.Probe(self.s1, "output") + self.probe3 = pb.Probe(self.n1, "output") + self.probe4 = pb.Probe(self.n1, "voltage") class Net3(pb.Network): @@ -87,33 +85,31 @@ class Net3(pb.Network): def __init__(self): super().__init__() self.inp1 = pb.InputProj(1, shape_out=(2, 2)) - self.n1 = pb.neuron.LIF((2, 2), 100, reset_v=1, leaky_v=-1) - self.n2 = pb.neuron.LIF((2, 2), 100, reset_v=1, leaky_v=-1) - self.s1 = pb.synapses.NoDecay( - self.inp1, self.n1, weights=10, conn_type=pb.synapses.ConnType.All2All + self.n1 = pb.LIF((2, 2), 100, reset_v=1, leak_v=-1) + self.n2 = pb.LIF((2, 2), 100, reset_v=1, leak_v=-1) + self.s1 = pb.FullConn( + self.inp1, self.n1, weights=10, conn_type=pb.SynConnType.All2All ) - self.s2 = pb.synapses.NoDecay( - self.n1, self.n2, weights=10, conn_type=pb.synapses.ConnType.All2All + self.s2 = pb.FullConn( + self.n1, self.n2, weights=10, conn_type=pb.SynConnType.All2All ) - self.probe1 = pb.simulator.Probe(self.n1, "voltage", name="n1_v") - self.probe2 = pb.simulator.Probe(self.n2, "voltage", name="n2_v") - self.probe3 = pb.simulator.Probe(self.n1, "output", name="n1_out") - self.probe4 = pb.simulator.Probe(self.n2, "output", name="n2_out") + self.probe1 = pb.Probe(self.n1, "voltage", name="n1_v") + self.probe2 = pb.Probe(self.n2, "voltage", name="n2_v") + self.probe3 = pb.Probe(self.n1, "output", name="n1_out") + self.probe4 = pb.Probe(self.n2, "output", name="n2_out") class TonicSpikingNet(pb.Network): def __init__(self): super().__init__() self.inp1 = pb.InputProj(fakeout, shape_out=(2,)) - self.n1 = pb.neuron.TonicSpiking((2,), 3) - self.s1 = pb.synapses.NoDecay( - self.inp1, self.n1, conn_type=pb.synapses.ConnType.One2One - ) + self.n1 = pb.TonicSpiking((2,), 3) + self.s1 = pb.FullConn(self.inp1, self.n1, conn_type=pb.SynConnType.One2One) - self.probe1 = pb.simulator.Probe(self.s1, "output") - self.probe2 = pb.simulator.Probe(self.n1, "output") - self.probe3 = pb.simulator.Probe(self.n1, "voltage") + self.probe1 = pb.Probe(self.s1, "output") + self.probe2 = pb.Probe(self.n1, "output") + self.probe3 = pb.Probe(self.n1, "voltage") @pytest.fixture(scope="class") diff --git a/tests/neuron/test_neurons.py b/tests/neuron/test_neurons.py index d1a819ef..002e4cc1 100644 --- a/tests/neuron/test_neurons.py +++ b/tests/neuron/test_neurons.py @@ -10,7 +10,7 @@ def test_NeuronParams_instance(ensure_dump_dir): - n1 = pb.neuron.LIF((100,), 3) + n1 = pb.LIF((100,), 3) attrs = NeuronAttrs.model_validate(n1.export_params(), strict=True) @@ -22,13 +22,13 @@ def test_NeuronParams_instance(ensure_dump_dir): def test_NeuronParams_check(): with pytest.raises(ValueError): - n1 = pb.neuron.LIF((100,), threshold=-1) + n1 = pb.LIF((100,), threshold=-1) with pytest.raises(ValueError): - n2 = pb.neuron.IF((100,), 1, delay=-1) + n2 = pb.IF((100,), 1, delay=-1) with pytest.raises(ValueError): - n3 = pb.neuron.IF((100,), 1, delay=1, tick_wait_start=-1, tick_wait_end=100) + n3 = pb.IF((100,), 1, delay=1, tick_wait_start=-1, tick_wait_end=100) class TestNeuronBehavior: @@ -245,23 +245,23 @@ def test_vjt_overflow(self, incoming_v, expected_v, expected_spike): ) def test_neuron_instance(shape): # keep_shape = True - n1 = pb.neuron.TonicSpiking(shape, 5, keep_shape=True) + n1 = pb.TonicSpiking(shape, 5, keep_shape=True) assert n1.shape_in == as_shape(shape) assert n1.shape_out == as_shape(shape) assert len(n1) == shape2num(shape) # keep_shape = False - n2 = pb.neuron.TonicSpiking(shape, 5) + n2 = pb.TonicSpiking(shape, 5) - assert n2.shape_in == as_shape(shape2num(shape)) - assert n2.shape_out == as_shape(shape2num(shape)) + assert n2.shape_in == as_shape(shape) + assert n2.shape_out == as_shape(shape) assert len(n2) == shape2num(shape) def test_neuron_keep_shape(): - n1 = pb.neuron.TonicSpiking((4, 4), 5, keep_shape=True) - n2 = pb.neuron.TonicSpiking((4, 4), 5, keep_shape=False) + n1 = pb.TonicSpiking((4, 4), 5, keep_shape=True) + n2 = pb.TonicSpiking((4, 4), 5, keep_shape=False) assert n1.spike.shape == (16,) assert n1.voltage.shape == (4, 4) @@ -300,7 +300,7 @@ def test_neuron_copy(): class TestNeuronSim: def test_TonicSpiking_simple_sim(self): - n1 = pb.neuron.TonicSpiking(shape=1, fire_step=3) + n1 = pb.TonicSpiking(shape=1, fire_step=3) inp_data = np.ones((10,), dtype=np.bool_) output = np.full((10, 1), 0, dtype=np.bool_) voltage = np.full((10, 1), 0, dtype=np.int32) @@ -312,7 +312,7 @@ def test_TonicSpiking_simple_sim(self): print(output) def test_PhasicSpiking_simple_sim(self): - n1 = pb.neuron.PhasicSpiking(shape=1, time_to_fire=3) + n1 = pb.PhasicSpiking(shape=1, time_to_fire=3) # [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] inp_data = np.concatenate((np.zeros((2,), np.bool_), np.ones((10,), np.bool_))) output = np.full((12, 1), 0, dtype=np.bool_) @@ -325,7 +325,7 @@ def test_PhasicSpiking_simple_sim(self): print(output) def test_IF_simple_sim(self): - n1 = pb.neuron.IF(shape=1, threshold=5, reset_v=2) + n1 = pb.IF(shape=1, threshold=5, reset_v=2) # [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] inp_data = np.concatenate((np.zeros((2,), np.bool_), np.ones((10,), np.bool_))) # inp_data = np.ones((12,), dtype=np.bool_) @@ -339,7 +339,7 @@ def test_IF_simple_sim(self): print(output) def test_LIF_simple_sim(self): - n1 = pb.neuron.LIF(shape=1, threshold=5, reset_v=2, leaky_v=1) # leak + 1 + n1 = pb.LIF(shape=1, threshold=5, reset_v=2, leak_v=1) # leak + 1 # [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] inp_data = np.concatenate((np.zeros((2,), np.bool_), np.ones((10,), np.bool_))) # inp_data = np.ones((12,), dtype=np.bool_) @@ -412,3 +412,27 @@ def test_tick_attr_behavior(self, monkeypatch, build_Net3): sim.reset() # TODO can add new test items here + + def test_Always1Neuron_behavior(self): + class Net(pb.Network): + def __init__(self): + super().__init__() + self.inp1 = pb.InputProj(input=None, shape_out=(1,)) + + self.n1 = pb.Always1Neuron(shape=(1,), tick_wait_start=1) + self.s1 = pb.FullConn( + self.inp1, self.n1, weights=0, conn_type=pb.SynConnType.One2One + ) + + self.probe1 = pb.Probe(self.n1, "spike") + + net = Net() + sim = pb.Simulator(net) + + for i in range(20): + net.inp1.input = np.random.randint(0, 2, size=(1,), dtype=np.bool_) + sim.run(1) + + assert np.array_equal( + sim.data[net.probe1], 20 * [np.ones((1,), dtype=np.bool_)] + ) diff --git a/tests/simulator/test_encoder.py b/tests/simulator/test_encoder.py index 8548fde9..971051f0 100644 --- a/tests/simulator/test_encoder.py +++ b/tests/simulator/test_encoder.py @@ -14,7 +14,6 @@ def test_PeriodicEncoder(self): pe = pb.simulator.PeriodicEncoder(spike) out_spike = np.full((20, 3), 0) - for t in range(20): out_spike[t] = pe() @@ -23,16 +22,31 @@ def test_PeriodicEncoder(self): assert np.array_equal(spike, out_spike[10:15]) assert np.array_equal(spike, out_spike[15:20]) + def test_LatencyEncoder(self): + N = 6 + x = np.random.rand(N) + T = 20 + + le1 = pb.simulator.LatencyEncoder(T, "linear") + le2 = pb.simulator.LatencyEncoder(T, "log") + + out_spike1 = np.zeros((T, N), dtype=np.bool_) + out_spike2 = np.zeros((T, N), dtype=np.bool_) + for t in range(T): + out_spike1[t] = le1(x) + out_spike2[t] = le2(x) + + assert 1 + def test_PoissonEncoder(self): seed = 1 rng = np.random.RandomState(seed=seed) x = rng.rand(10, 10).astype(np.float32) + pe = pb.simulator.PoissonEncoder(seed=seed) out_spike = np.full((20, 10, 10), 0) - for t in range(20): out_spike[t] = pe(x=x) - for t in range(1, 20): assert not np.array_equal(out_spike[0], out_spike[t]) diff --git a/tests/simulator/test_simulator.py b/tests/simulator/test_simulator.py index 53aa4ea0..a0461a38 100644 --- a/tests/simulator/test_simulator.py +++ b/tests/simulator/test_simulator.py @@ -14,19 +14,19 @@ def __init__(self, n_neuron: int): self.inp = pb.InputProj(pe, shape_out=(n_neuron,), keep_shape=True) self.n1 = pb.LIF(n_neuron, threshold=3, reset_v=0, tick_wait_start=1) self.n2 = pb.IF(n_neuron, threshold=3, reset_v=1, tick_wait_start=2) - self.s0 = pb.NoDecay( + self.s0 = pb.FullConn( self.inp, self.n1, weights=np.random.randint(-128, 128, size=(n_neuron,), dtype=np.int8), - conn_type=pb.synapses.ConnType.One2One, + conn_type=pb.SynConnType.One2One, ) - self.s1 = pb.NoDecay( + self.s1 = pb.FullConn( self.n1, self.n2, weights=np.random.randint( -128, 128, size=(n_neuron, n_neuron), dtype=np.int8 ), - conn_type=pb.synapses.ConnType.All2All, + conn_type=pb.SynConnType.All2All, ) # Probes inside @@ -50,17 +50,17 @@ def __init__(self, n: int): self.inp1 = pb.InputProj(fake_out_1, shape_out=(n,), keep_shape=True) self.inp2 = pb.InputProj(fake_out_2, shape_out=(n,), keep_shape=True) self.n1 = pb.LIF(n, threshold=3, reset_v=0, tick_wait_start=1) - self.s0 = pb.NoDecay( + self.s0 = pb.FullConn( self.inp1, self.n1, weights=np.ones((n,), dtype=np.int8), - conn_type=pb.synapses.ConnType.One2One, + conn_type=pb.SynConnType.One2One, ) - self.s1 = pb.NoDecay( + self.s1 = pb.FullConn( self.inp2, self.n1, weights=np.ones((n,), dtype=np.int8), - conn_type=pb.synapses.ConnType.One2One, + conn_type=pb.SynConnType.One2One, ) # Probes inside @@ -79,17 +79,17 @@ def __init__(self, n: int): self.inp1 = pb.InputProj(pe1, shape_out=(n,), keep_shape=True) self.inp2 = pb.InputProj(pe2, shape_out=(n,), keep_shape=True) self.n1 = pb.LIF(n, threshold=3, reset_v=0, tick_wait_start=1) - self.s0 = pb.NoDecay( + self.s0 = pb.FullConn( self.inp1, self.n1, weights=np.ones((n,), dtype=np.int8), - conn_type=pb.synapses.ConnType.One2One, + conn_type=pb.SynConnType.One2One, ) - self.s1 = pb.NoDecay( + self.s1 = pb.FullConn( self.inp2, self.n1, weights=np.ones((n,), dtype=np.int8), - conn_type=pb.synapses.ConnType.One2One, + conn_type=pb.SynConnType.One2One, ) # Probes inside @@ -98,50 +98,28 @@ def __init__(self, n: int): self.n1_output = pb.Probe(self.n1, "spike") -class Nested_Net_level_1(pb.DynSysGroup): - """Level 1 nested network: pre_n -> syn -> post_n""" - +class Conv2d_Net(pb.Network): def __init__(self): super().__init__() - self.pre_n = pb.LIF((10,), 2, tick_wait_start=2) - self.post_n = pb.LIF((10,), 10, tick_wait_start=3) - - w = np.ones((10, 10), dtype=np.int8) - self.syn = pb.NoDecay( - self.pre_n, self.post_n, conn_type=pb.synapses.ConnType.All2All, weights=w - ) - - self.probe_in_subnet = pb.Probe(self.pre_n, "spike") + pe1 = pb.simulator.PoissonEncoder() + self.inp1 = pb.InputProj(pe1, shape_out=(8, 24, 24)) + self.n1 = pb.IF((16, 22, 22), threshold=10, reset_v=0, keep_shape=True) -class Nested_Net_level_2(pb.DynSysGroup): - """Level 2 nested network: -> s1 -> Nested_Net_level_1""" + kernel = np.random.randint(-128, 128, size=(8, 16, 3, 3), dtype=np.int8) + stride = 1 - def __init__(self): - self.inp1 = pb.InputProj(None, shape_out=(10,)) - self.n1 = pb.LIF((10,), 2, tick_wait_start=1) - - subnet = Nested_Net_level_1() - - self.s1 = pb.NoDecay( + self.conv1 = pb.Conv2d( self.inp1, self.n1, - conn_type=pb.synapses.ConnType.One2One, - ) - self.s2 = pb.NoDecay( - self.n1, - subnet.pre_n, - conn_type=pb.synapses.ConnType.One2One, + kernel, + stride=stride, + kernel_order="IOHW", ) - self.probe1 = pb.Probe(self.inp1, "spike") - self.probe2 = pb.Probe(self.n1, "spike") - self.probe3 = pb.Probe(self.n1, "voltage") - self.probe4 = pb.Probe(subnet.pre_n, "spike") - self.probe5 = pb.Probe(subnet.post_n, "spike") - - super().__init__(subnet) + self.prob1 = pb.Probe(self.n1, "spike") + self.prob2 = pb.Probe(self.n1, "feature_map") class TestSimulator: @@ -237,14 +215,23 @@ def test_sim_specify_inputs_2(self): sim.reset() - def test_sim_nested_net(self): - net = Nested_Net_level_2() + def test_sim_nested_net(self, build_Nested_Net_L3): + net = build_Nested_Net_L3 sim = pb.Simulator(net, start_time_zero=False) # The probes defined in the subnets cannot be discovered. - assert len(sim.probes) == 5 + assert len(sim.probes) == 4 net.inp1.input = np.ones((10,), dtype=np.int8) sim.run(20) sim.reset() + + def test_sim_conv2d_net(self): + net = Conv2d_Net() + sim = pb.Simulator(net, start_time_zero=False) + + net.inp1.input = np.random.rand(8, 24, 24) + sim.run(10) + + sim.reset() diff --git a/tests/synapses/test_synapses.py b/tests/synapses/test_synapses.py index 92e315af..34644bd3 100644 --- a/tests/synapses/test_synapses.py +++ b/tests/synapses/test_synapses.py @@ -4,16 +4,17 @@ import paibox as pb from paibox.exceptions import ShapeError +from paibox.utils import shape2num def test_SynSys_Attrs(): - n1 = pb.neuron.TonicSpiking(3, 3) - n2 = pb.neuron.TonicSpiking(3, 3) - s1 = pb.synapses.NoDecay( + n1 = pb.TonicSpiking(3, 3) + n2 = pb.TonicSpiking(3, 3) + s1 = pb.FullConn( n1, n2, weights=np.array([[1, 1, 0], [0, 1, 1], [0, 1, 1]], dtype=np.int8), - conn_type=pb.synapses.ConnType.MatConn, + conn_type=pb.SynConnType.MatConn, ) assert np.array_equal(s1.n_axon_each, np.array([1, 3, 2])) @@ -23,53 +24,51 @@ def test_SynSys_Attrs(): assert s1.weights.dtype == np.int8 -class TestNoDecay: +class TestFullConn: @pytest.mark.parametrize( "n1, n2, scalar_weight, expected_wp", [ ( - pb.neuron.TonicSpiking(10, 3), - pb.neuron.TonicSpiking(10, 3), + pb.TonicSpiking(10, 3), + pb.TonicSpiking(10, 3), 1, WP.WEIGHT_WIDTH_1BIT, ), ( - pb.neuron.TonicSpiking((3, 3), 3), - pb.neuron.TonicSpiking((3, 3), 3), + pb.TonicSpiking((3, 3), 3), + pb.TonicSpiking((3, 3), 3), 4, WP.WEIGHT_WIDTH_4BIT, ), ( - pb.neuron.TonicSpiking((5,), 3), - pb.neuron.TonicSpiking((5,), 3), + pb.TonicSpiking((5,), 3), + pb.TonicSpiking((5,), 3), -1, WP.WEIGHT_WIDTH_2BIT, ), # TODO 3-dimension shape is correct for data flow? ( - pb.neuron.TonicSpiking((10, 2, 3), 3), - pb.neuron.TonicSpiking((10, 2, 3), 3), + pb.TonicSpiking((10, 2, 3), 3), + pb.TonicSpiking((10, 2, 3), 3), 16, WP.WEIGHT_WIDTH_8BIT, ), ( - pb.neuron.TonicSpiking((10, 2), 3), - pb.neuron.TonicSpiking((4, 5), 3), + pb.TonicSpiking((10, 2), 3), + pb.TonicSpiking((4, 5), 3), -100, WP.WEIGHT_WIDTH_8BIT, ), ( - pb.neuron.TonicSpiking(10, 3), - pb.neuron.TonicSpiking((2, 5), 3), + pb.TonicSpiking(10, 3), + pb.TonicSpiking((2, 5), 3), 7, WP.WEIGHT_WIDTH_4BIT, ), ], ) - def test_NoDecay_One2One_scalar(self, n1, n2, scalar_weight, expected_wp): - s1 = pb.synapses.NoDecay( - n1, n2, scalar_weight, conn_type=pb.synapses.ConnType.One2One - ) + def test_FullConn_One2One_scalar(self, n1, n2, scalar_weight, expected_wp): + s1 = pb.FullConn(n1, n2, scalar_weight, conn_type=pb.SynConnType.One2One) assert np.array_equal(s1.weights, scalar_weight) assert (s1.num_in, s1.num_out) == (n1.num_out, n2.num_in) @@ -77,40 +76,45 @@ def test_NoDecay_One2One_scalar(self, n1, n2, scalar_weight, expected_wp): s1.connectivity, scalar_weight * np.eye(n1.num_out, n2.num_in, dtype=np.int8), ) + assert ( + s1.connectivity.dtype == np.int8 + if expected_wp > WP.WEIGHT_WIDTH_1BIT + else np.bool_ + ) assert s1.weight_precision is expected_wp @pytest.mark.parametrize( "n1, n2", [ ( - pb.neuron.TonicSpiking(10, 3), - pb.neuron.TonicSpiking(100, 4), + pb.TonicSpiking(10, 3), + pb.TonicSpiking(100, 4), ), ( - pb.neuron.TonicSpiking((10, 10), 3), - pb.neuron.TonicSpiking((5, 10), 4), + pb.TonicSpiking((10, 10), 3), + pb.TonicSpiking((5, 10), 4), ), ( - pb.neuron.IF((10,), 3), - pb.neuron.TonicSpiking((5,), 4), + pb.IF((10,), 3), + pb.TonicSpiking((5,), 4), ), ( - pb.neuron.TonicSpiking(10, 3), - pb.neuron.TonicSpiking((5, 10), 4), + pb.TonicSpiking(10, 3), + pb.TonicSpiking((5, 10), 4), ), ], ) - def test_NoDecay_One2One_scalar_illegal(self, n1, n2): + def test_FullConn_One2One_scalar_illegal(self, n1, n2): with pytest.raises(ShapeError): - s1 = pb.synapses.NoDecay(n1, n2, conn_type=pb.synapses.ConnType.One2One) + s1 = pb.FullConn(n1, n2, conn_type=pb.SynConnType.One2One) - def test_NoDecay_One2One_matrix(self): + def test_FullConn_One2One_matrix(self): weight = np.array([2, 3, 4], np.int8) - s1 = pb.synapses.NoDecay( - pb.neuron.TonicSpiking((3,), 3), - pb.neuron.TonicSpiking((3,), 3), + s1 = pb.FullConn( + pb.TonicSpiking((3,), 3), + pb.TonicSpiking((3,), 3), weight, - conn_type=pb.synapses.ConnType.One2One, + conn_type=pb.SynConnType.One2One, ) assert (s1.num_in, s1.num_out) == (3, 3) @@ -118,14 +122,15 @@ def test_NoDecay_One2One_matrix(self): assert np.array_equal( s1.connectivity, np.array([[2, 0, 0], [0, 3, 0], [0, 0, 4]], dtype=np.int8) ) + assert s1.connectivity.dtype == np.int8 assert s1.weight_precision is WP.WEIGHT_WIDTH_4BIT weight = np.array([1, 0, 1, 0], np.int8) - s2 = pb.synapses.NoDecay( - pb.neuron.TonicSpiking((2, 2), 3), - pb.neuron.TonicSpiking((2, 2), 3), + s2 = pb.FullConn( + pb.TonicSpiking((2, 2), 3), + pb.TonicSpiking((2, 2), 3), weight, - conn_type=pb.synapses.ConnType.One2One, + conn_type=pb.SynConnType.One2One, ) assert (s2.num_in, s2.num_out) == (4, 4) @@ -136,125 +141,227 @@ def test_NoDecay_One2One_matrix(self): [[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0]], dtype=np.bool_ ), ) + assert s2.connectivity.dtype == np.int8 assert s2.weight_precision is WP.WEIGHT_WIDTH_1BIT @pytest.mark.parametrize( "n1, n2", [ - (pb.neuron.TonicSpiking(10, 3), pb.neuron.TonicSpiking(10, 3)), + (pb.TonicSpiking(10, 3), pb.TonicSpiking(10, 3)), ( - pb.neuron.TonicSpiking((3, 3), 3), - pb.neuron.TonicSpiking((3, 3), 3), + pb.TonicSpiking((3, 3), 3), + pb.TonicSpiking((3, 3), 3), ), ( - pb.neuron.TonicSpiking((5,), 3), - pb.neuron.TonicSpiking((5,), 3), + pb.TonicSpiking((5,), 3), + pb.TonicSpiking((5,), 3), ), ( - pb.neuron.TonicSpiking(10, 3), - pb.neuron.TonicSpiking(100, 3), + pb.TonicSpiking(10, 3), + pb.TonicSpiking(100, 3), ), ( - pb.neuron.TonicSpiking((10, 10), 3), - pb.neuron.TonicSpiking((5, 5), 3), + pb.TonicSpiking((10, 10), 3), + pb.TonicSpiking((5, 5), 3), ), ], ) - def test_NoDecay_All2All(self, n1, n2): - s1 = pb.synapses.NoDecay(n1, n2, conn_type=pb.synapses.ConnType.All2All) + def test_FullConn_All2All(self, n1, n2): + s1 = pb.FullConn(n1, n2, conn_type=pb.SynConnType.All2All) assert (s1.num_in, s1.num_out) == (n1.num_out, n2.num_in) + assert s1.connectivity.dtype == np.bool_ assert np.array_equal(s1.weights, 1) assert np.array_equal(s1.connectivity, np.ones((n1.num_out, n2.num_in))) - def test_NoDecay_All2All_with_weights(self): - n1 = pb.neuron.TonicSpiking(3, 3) - n2 = pb.neuron.TonicSpiking(3, 3) + def test_FullConn_All2All_with_weights(self): + n1 = pb.TonicSpiking(3, 3) + n2 = pb.TonicSpiking(3, 3) """1. Single weight.""" weight = 2 - s1 = pb.synapses.NoDecay(n1, n2, weight, conn_type=pb.synapses.ConnType.All2All) + s1 = pb.FullConn(n1, n2, weight, conn_type=pb.SynConnType.All2All) assert np.array_equal(s1.weights, weight) + assert s1.connectivity.dtype == np.int8 assert s1.weight_precision is WP.WEIGHT_WIDTH_4BIT """2. Weights matrix.""" weight = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) - s2 = pb.synapses.NoDecay(n1, n2, weight, conn_type=pb.synapses.ConnType.All2All) + s2 = pb.FullConn(n1, n2, weight, conn_type=pb.SynConnType.All2All) + assert s2.connectivity.dtype == np.int8 assert np.array_equal(s2.weights, weight) assert np.array_equal(s2.connectivity, weight) # Wrong shape with pytest.raises(ShapeError): - s3 = pb.synapses.NoDecay( - n1, n2, np.array([1, 2, 3]), conn_type=pb.synapses.ConnType.All2All + s3 = pb.FullConn( + n1, n2, np.array([1, 2, 3]), conn_type=pb.SynConnType.All2All ) with pytest.raises(ShapeError): - s3 = pb.synapses.NoDecay( + s3 = pb.FullConn( n1, n2, np.array([[1, 2, 3], [4, 5, 6]]), - conn_type=pb.synapses.ConnType.All2All, + conn_type=pb.SynConnType.All2All, ) with pytest.raises(ShapeError): - s3 = pb.synapses.NoDecay( + s3 = pb.FullConn( n1, n2, np.array([[1, 2], [4, 5], [6, 7]]), - conn_type=pb.synapses.ConnType.All2All, + conn_type=pb.SynConnType.All2All, ) with pytest.raises(ShapeError): - s3 = pb.synapses.NoDecay( + s3 = pb.FullConn( n1, n2, np.array([[1, 2, 3], [4, 5, 6], [6, 7, 8], [1, 2, 3]]), - conn_type=pb.synapses.ConnType.All2All, + conn_type=pb.SynConnType.All2All, ) @pytest.mark.parametrize( "n1, n2", [ - (pb.neuron.TonicSpiking(10, 3), pb.neuron.TonicSpiking(10, 3)), + (pb.TonicSpiking(10, 3), pb.TonicSpiking(10, 3)), ( - pb.neuron.TonicSpiking((3, 3), 3), - pb.neuron.TonicSpiking((3, 3), 3), + pb.TonicSpiking((3, 3), 3), + pb.TonicSpiking((3, 3), 3), ), ( - pb.neuron.TonicSpiking((5,), 3), - pb.neuron.TonicSpiking((5,), 3), + pb.TonicSpiking((5,), 3), + pb.TonicSpiking((5,), 3), ), ], ) - def test_NoDecay_MatConn(self, n1, n2): + def test_FullConn_MatConn(self, n1, n2): weight = np.random.randint( -128, 128, size=(n1.num_out, n2.num_in), dtype=np.int8 ) - s = pb.synapses.NoDecay(n1, n2, weight, conn_type=pb.synapses.ConnType.MatConn) + s = pb.FullConn(n1, n2, weight, conn_type=pb.SynConnType.MatConn) - assert np.array_equal(s.weights, weight) assert (s.num_in, s.num_out) == (n1.num_out, n2.num_in) + assert s.connectivity.dtype == np.int8 + assert np.array_equal(s.weights, weight) assert np.array_equal(s.connectivity, weight) # Wrong weight type with pytest.raises(TypeError): - s = pb.synapses.NoDecay(n1, n2, 1, conn_type=pb.synapses.ConnType.MatConn) + s = pb.FullConn(n1, n2, 1, conn_type=pb.SynConnType.MatConn) # Wrong shape with pytest.raises(ShapeError): - s = pb.synapses.NoDecay( - n1, n2, np.array([1, 2, 3]), conn_type=pb.synapses.ConnType.MatConn + s = pb.FullConn( + n1, n2, np.array([1, 2, 3]), conn_type=pb.SynConnType.MatConn ) # Wrong shape with pytest.raises(ShapeError): - s = pb.synapses.NoDecay( + s = pb.FullConn( n1, n2, np.array([[1, 2, 3], [4, 5, 6]]), - conn_type=pb.synapses.ConnType.MatConn, + conn_type=pb.SynConnType.MatConn, ) + + +class TestConv2d: + def test_Conv1d_instance(self): + in_shape = (32,) + kernel_size = (5,) + stride = 2 + out_shape = ((32 - 5) // 2 + 1,) + in_channels = 8 + out_channels = 16 + korder = "IOL" + + n1 = pb.IF((in_channels,) + in_shape, 3) # CL + n2 = pb.IF((out_channels,) + out_shape, 3) + + weight = np.random.randint( + -128, 128, size=(in_channels, out_channels) + kernel_size, dtype=np.int8 + ) + s1 = pb.Conv1d(n1, n2, weight, stride=stride, kernel_order=korder) + + assert s1.num_in == in_channels * shape2num(in_shape) + assert s1.connectivity.dtype == np.int8 + assert s1.connectivity.shape == ( + in_channels * shape2num(in_shape), + out_channels * shape2num(out_shape), + ) + + def test_Conv2d_instance(self): + in_shape = (32, 32) + kernel_size = (5, 5) + stride = 2 + out_shape = ((32 - 5) // 2 + 1, (32 - 5) // 2 + 1) + in_channels = 8 + out_channels = 16 + korder = "IOHW" + + n1 = pb.IF((in_channels,) + in_shape, 3) # CHW + n2 = pb.IF((out_channels,) + out_shape, 3) + + weight = np.random.randint( + -8, 8, size=(in_channels, out_channels) + kernel_size, dtype=np.int32 + ) + s1 = pb.Conv2d(n1, n2, weight, stride=stride, kernel_order=korder) + + assert s1.num_in == in_channels * shape2num(in_shape) + assert s1.connectivity.dtype == np.int8 + assert s1.connectivity.shape == ( + in_channels * shape2num(in_shape), + out_channels * shape2num(out_shape), + ) + + def test_Conv1d_inchannel_omitted(self): + in_shape = (32,) + kernel_size = (5,) + stride = 2 + out_shape = ((32 - 5) // 2 + 1,) + in_channels = 1 # omit it + out_channels = 4 + korder = "IOL" + + n1 = pb.IF(in_shape, 3) # L, (in_channels=1) + n2 = pb.IF((out_channels,) + out_shape, 3) + + weight = np.random.randint( + -128, 128, size=(in_channels, out_channels) + kernel_size, dtype=np.int64 + ) + s1 = pb.Conv1d(n1, n2, weight, stride=stride, kernel_order=korder) + + assert s1.num_in == in_channels * shape2num(in_shape) + assert s1.connectivity.dtype == np.int8 + assert s1.connectivity.shape == ( + in_channels * shape2num(in_shape), + out_channels * shape2num(out_shape), + ) + + def test_Conv2d_inchannel_omitted(self): + in_shape = (32, 32) + kernel_size = (5, 5) + stride = 2 + out_shape = ((32 - 5) // 2 + 1, (32 - 5) // 2 + 1) + in_channels = 1 # omit it + out_channels = 4 + korder = "IOHW" + + n1 = pb.IF(in_shape, 3) # HW, (in_channels=1) + n2 = pb.IF((out_channels,) + out_shape, 3) + + weight = np.random.randint( + -128, 128, size=(in_channels, out_channels) + kernel_size, dtype=np.int8 + ) + s1 = pb.Conv2d(n1, n2, weight, stride=stride, kernel_order=korder) + + assert s1.num_in == in_channels * shape2num(in_shape) + assert s1.connectivity.shape == ( + in_channels * shape2num(in_shape), + out_channels * shape2num(out_shape), + ) diff --git a/tests/synapses/test_transforms.py b/tests/synapses/test_transforms.py index 59ead01a..3bfab957 100644 --- a/tests/synapses/test_transforms.py +++ b/tests/synapses/test_transforms.py @@ -1,187 +1,474 @@ +from typing import Tuple + import numpy as np import pytest -from paibox.synapses.transforms import AllToAll, MaskedLinear, OneToOne - - -@pytest.mark.parametrize( - "weight", - [ - (np.array([1, 2, 3], dtype=np.int8)), - (np.array([1, 0, 1], dtype=np.bool_)), - (np.array([1, 0, 1], dtype=np.int8)), - (10), - (np.int8(-1)), - (np.array([127, 0, 1], dtype=np.int8)), - (np.array([-128, 1, 127], dtype=np.int8)), - ], - ids=[ - "array_1", - "array_2", - "array_3", - "scalar_pos", - "scalar_neg", - "array_int8_1", - "array_int8_2", - ], -) -def test_OneToOne_dtype(weight): - num = 3 - f = OneToOne(num, weight) - x = np.array([1, 0, 1], dtype=np.bool_) - y = f(x) - expected = x * weight - - assert y.dtype == np.int32 - assert y.shape == (num,) - assert np.array_equal(y, expected) - assert f.connectivity.shape == (num, num) - - -def test_OneToOne(): - weight = np.array([1, 2, 3, 4], dtype=np.int8) - f = OneToOne(4, weight) - assert f.connectivity.shape == (4, 4) - - # The last spike is an array. - x1 = np.array([1, 2, 3, 4], dtype=np.int8) - y = f(x1) - assert y.shape == (4,) - - # The last spike is a scalar. - x2 = np.array(2, dtype=np.int8) - y = f(x2) - assert y.shape == (4,) - - -@pytest.mark.parametrize( - "weight, expected_dtype", - [ - (1, np.bool_), - (-1, np.int8), - (10, np.int8), - (-100, np.int8), - (-128, np.int8), - (127, np.int8), - ], - ids=[ - "scalar_1", - "scalar_-1", - "scalar_10", - "scalar_-100", - "scalar_-128", - "scalar_-127", - ], -) -def test_AllToAll_weight_scalar(weight, expected_dtype): - """Test `AllToAll` when weight is a scalar""" - - num_in, num_out = 10, 20 - x = np.random.randint(2, size=(10,)) - f = AllToAll((num_in, num_out), weight) - y = f(x) - expected = np.full((num_out,), np.sum(x, axis=None), dtype=np.int32) * weight - - assert f.conn_dtype == expected_dtype - assert y.dtype == np.int32 - assert y.shape == (num_out,) - assert y.ndim == 1 - assert np.array_equal(y, expected) - assert f.connectivity.shape == (num_in, num_out) - - -@pytest.mark.parametrize( - "shape, x, weights, expected_dtype", - [ - ( - (3, 4), - np.random.randint(2, size=(3,), dtype=np.bool_), - np.random.randint(2, size=(3, 4), dtype=np.bool_), - np.bool_, - ), - ( - (10, 20), - np.random.randint(2, size=(10,), dtype=np.bool_), - np.random.randint(127, size=(10, 20), dtype=np.int8), - np.int8, - ), - ( - (20, 10), - np.random.randint(2, size=(20,), dtype=np.bool_), - np.random.randint(2, size=(20, 10), dtype=np.int8), - np.bool_, - ), - ( - (2, 2), - np.array([1, 1], dtype=np.bool_), - np.array([[1, 2], [3, 4]], dtype=np.int8), - np.int8, - ), - ( - (2, 2), - np.array([1, 1], dtype=np.bool_), - np.array([[127, 0], [3, -128]], dtype=np.int8), - np.int8, - ), - ], - ids=[ - "weights_bool_1", - "weights_int8_1", - "weights_int8_2", - "weights_int8_3", - "weights_int8_4", - ], -) -def test_AllToAll_array(shape, x, weights, expected_dtype): - """Test `AllToAll` when weights is an array""" - - f = AllToAll(shape, weights) - y = f(x) - expected = x @ weights.copy().astype(np.int32) - - assert f.conn_dtype == expected_dtype - assert np.array_equal(y, expected) - assert f.connectivity.shape == shape - - -@pytest.mark.parametrize( - "shape, x, weights, expected_dtype", - [ - ( - (3, 4), - np.array([1, 1, 1], dtype=np.bool_), - np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=np.int8), - np.int8, - ), - ( - (10, 20), - np.random.randint(2, size=(10,), dtype=np.bool_), - np.random.randint(-10, 10, size=(10, 20), dtype=np.int8), - np.int8, - ), - ( - (20, 10), - np.ones((20,), dtype=np.bool_), - np.random.randint(2, size=(20, 10), dtype=np.int8), - np.bool_, - ), - ( - (2, 2), - np.array([1, 1], dtype=np.bool_), - np.array([[127, 0], [3, -128]], dtype=np.int8), - np.int8, - ), - ], - ids=["weights_int8_1", "weights_int8_2", "weights_bool", "weights_int8_3"], -) -def test_MaskedLinear_conn(shape, x, weights, expected_dtype): - f = MaskedLinear(shape, weights) - y = f(x) - expected = x @ weights.copy().astype(np.int32) - - assert f.conn_dtype == expected_dtype - assert f.connectivity.dtype == expected_dtype - assert y.shape == (shape[1],) - assert y.dtype == np.int32 - assert np.array_equal(y, expected) - assert f.connectivity.shape == shape +from paibox.exceptions import AutoOptimizationWarning +from paibox.synapses.transforms import * +from paibox.synapses.transforms import Transform +from paibox.utils import shape2num + + +class TestTransforms: + @pytest.mark.parametrize( + "weight, expected_dtype", + [ + (np.array([1, 2, 3], dtype=np.int8), np.int8), + (np.array([1, 0, 1], dtype=np.bool_), np.bool_), + (np.array([True, False]), np.bool_), + (np.array([True, False], dtype=np.int8), np.int8), + (10, np.int8), + (1, np.bool_), + (True, np.bool_), + (np.int8(1), np.bool_), # automatically optimizated + (np.uint8(99), np.int8), + (np.array([-128, 1, 127], dtype=np.int8), np.int8), + ([1, 2, 3], np.int8), + ((0, 1, 0, 1), np.int8), + ], + ) + def test_weight_dtype_convert(self, weight, expected_dtype): + tfm = Transform(weight) + assert tfm.weights.dtype == expected_dtype + + @pytest.mark.parametrize( + "weight, expected_dtype", + [ + (np.array([1, 2, 3]), np.int8), + # Only automatically optimized to int8 unless specified as bool + (np.array([True, False], dtype=np.int16), np.int8), + (np.array([1, 0, 1], dtype=np.int16), np.int8), # Same as above + (np.array([-128, 1, 127], dtype=np.int32), np.int8), + (np.array([-8, 4, 7]), np.int8), + ([-100, 0, 100], np.int8), + ], + ) + def test_weight_dtype_convert_warning(self, weight, expected_dtype): + with pytest.warns(AutoOptimizationWarning): + tfm = Transform(weight) + + assert tfm.weights.dtype == expected_dtype + + @pytest.mark.parametrize( + "weight", + [ + (np.array([1.0, 2.1, 3.2])), # float is forbidden + (np.array([1, 2, 3], dtype=np.float32)), + (np.array([111, 222, -333], dtype=np.int16)), # out of range int8 + (999), + (3.14), + ([-100, 200, 0]), + ((1.1, 0.5)), + ], + ) + def test_weight_dtype_convert_illegal(self, weight): + with pytest.raises((TypeError, ValueError)): + tfm = Transform(weight) + + @pytest.mark.parametrize( + "weight", + [ + (np.array([1, 2, 3], dtype=np.int8)), + (np.array([1, 0, 1], dtype=np.bool_)), + (np.array([1, 0, 1], dtype=np.int8)), + (10), + (np.int8(-1)), + (np.array([127, 0, 1], dtype=np.int8)), + (np.array([-128, 1, 127], dtype=np.int8)), + ], + ids=[ + "array_1", + "array_2", + "array_3", + "scalar_pos", + "scalar_neg", + "array_int8_1", + "array_int8_2", + ], + ) + def test_OneToOne_dtype(self, weight): + num = 3 + f = OneToOne(num, weight) + x = np.array([1, 0, 1], dtype=np.bool_) + y = f(x) + expected = x * weight + + assert y.dtype == np.int32 + assert y.shape == (num,) + assert np.array_equal(y, expected) + assert f.connectivity.shape == (num, num) + + def test_OneToOne(self): + weight = np.array([1, 2, 3, 4], dtype=np.int8) + f = OneToOne(4, weight) + assert f.connectivity.shape == (4, 4) + + # The last spike is an array. + x1 = np.array([1, 2, 3, 4], dtype=np.int8) + y = f(x1) + assert y.shape == (4,) + + # The last spike is a scalar. + x2 = np.array(2, dtype=np.int8) + y = f(x2) + assert y.shape == (4,) + + @pytest.mark.parametrize( + "weight, expected_dtype", + [ + (1, np.bool_), + (-1, np.int8), + (10, np.int8), + (-100, np.int8), + (-128, np.int8), + (127, np.int8), + ], + ids=[ + "scalar_1", + "scalar_-1", + "scalar_10", + "scalar_-100", + "scalar_-128", + "scalar_-127", + ], + ) + def test_AllToAll_weight_scalar(self, weight, expected_dtype): + """Test `AllToAll` when weight is a scalar""" + + num_in, num_out = 10, 20 + x = np.random.randint(2, size=(10,)) + f = AllToAll((num_in, num_out), weight) + y = f(x) + expected = np.full((num_out,), np.sum(x, axis=None), dtype=np.int32) * weight + + assert f.connectivity.dtype == expected_dtype + assert y.dtype == np.int32 + assert y.shape == (num_out,) + assert y.ndim == 1 + assert np.array_equal(y, expected) + assert f.connectivity.shape == (num_in, num_out) + + @pytest.mark.parametrize( + "shape, x, weights, expected_dtype", + [ + ( + (3, 4), + np.random.randint(2, size=(3,), dtype=np.bool_), + np.random.randint(2, size=(3, 4), dtype=np.bool_), + np.bool_, + ), + ( + (10, 20), + np.random.randint(2, size=(10,), dtype=np.bool_), + np.random.randint(127, size=(10, 20), dtype=np.int8), + np.int8, + ), + ( + (20, 10), + np.random.randint(2, size=(20,), dtype=np.bool_), + np.random.randint(2, size=(20, 10), dtype=np.bool_), + np.bool_, + ), + ( + (2, 2), + np.array([1, 1], dtype=np.bool_), + np.array([[1, 2], [3, 4]], dtype=np.int8), + np.int8, + ), + ( + (2, 2), + np.array([1, 1], dtype=np.bool_), + np.array([[127, 0], [3, -128]], dtype=np.int8), + np.int8, + ), + ], + ids=[ + "weights_bool_1", + "weights_int8_1", + "weights_int8_2", + "weights_int8_3", + "weights_int8_4", + ], + ) + def test_AllToAll_array(self, shape, x, weights, expected_dtype): + """Test `AllToAll` when weights is an array""" + + f = AllToAll(shape, weights) + y = f(x) + expected = x @ weights.copy().astype(np.int32) + + assert f.connectivity.dtype == expected_dtype + assert np.array_equal(y, expected) + assert f.connectivity.shape == shape + + @pytest.mark.parametrize( + "shape, x, weights, expected_dtype", + [ + ( + (3, 4), + np.array([1, 1, 1], dtype=np.bool_), + np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=np.int8), + np.int8, + ), + ( + (10, 20), + np.random.randint(2, size=(10,), dtype=np.bool_), + np.random.randint(-10, 10, size=(10, 20), dtype=np.int8), + np.int8, + ), + ( + (20, 10), + np.ones((20,), dtype=np.bool_), + np.random.randint(2, size=(20, 10), dtype=np.bool_), + np.bool_, + ), + ( + (2, 2), + np.array([1, 1], dtype=np.bool_), + np.array([[127, 0], [3, -128]], dtype=np.int8), + np.int8, + ), + ], + ids=["weights_int8_1", "weights_int8_2", "weights_bool", "weights_int8_3"], + ) + def test_MaskedLinear_conn(self, shape, x, weights, expected_dtype): + f = MaskedLinear(shape, weights) + y = f(x) + expected = x @ weights.copy().astype(np.int32) + + assert f.connectivity.dtype == expected_dtype + assert y.shape == (shape[1],) + assert y.dtype == np.int32 + assert np.array_equal(y, expected) + assert f.connectivity.shape == shape + + @staticmethod + def _conv1d_golden( + x: np.ndarray, + out_shape: Tuple[int], + kernel: np.ndarray, + stride: Tuple[int], + padding: Tuple[int], + fm_order: str, + ): + cout, cin, kl = kernel.shape + + if fm_order == "LC": + _x = x.T + else: + _x = x.copy() + + xcin, il = _x.shape + + assert cin == xcin + + ol = (il - kl + 2 * padding[0]) // stride[0] + 1 + + assert ol == out_shape[0] + + out = np.zeros((cout,) + out_shape, dtype=np.int64) + + x_padded = np.pad(_x, (0, padding[0]), mode="constant") + + for o in range(cout): + for i in range(cin): + conv_result = np.zeros((ol,), dtype=np.int64) + for l in range(ol): + window = x_padded[i, l * stride[0] : l * stride[0] + kl] + conv_result[l] = np.sum(window * kernel[o, i, :]) + + out[o] += conv_result + + if fm_order == "LC": + return out.T + else: + return out + + @pytest.mark.parametrize( + "in_shape, in_channels, out_channels, kernel_size, stride, padding, fm_order, kdtype", + # Padding is fixed at (0, 0) + [ + ((28,), 16, 8, (3,), (1,), (0,), "CL", np.bool_), + ((28,), 24, 12, (3,), (2,), (0,), "CL", np.bool_), + ((28,), 24, 12, (5,), (2,), (0,), "CL", np.bool_), + ((16,), 8, 16, (3,), (2,), (0,), "CL", np.bool_), + ((28,), 16, 8, (3,), (1,), (0,), "CL", np.int8), + ((28,), 24, 12, (3,), (2,), (0,), "CL", np.int8), + ((28,), 24, 12, (5,), (2,), (0,), "CL", np.int8), + ((16,), 8, 16, (3,), (2,), (0,), "CL", np.int8), + # ((28,), 16, 8, (3,), (1,), (0,), "LC"), + # ((24,), 8, 8, (3,), (2,), (0,), "LC"), + # ((24,), 8, 16, (7,), (2,), (0,), "LC"), + # ((32,), 4, 12, (5,), (1,), (0,), "LC"), + ], + ) + def test_Conv1dForward( + self, + in_shape, + in_channels, + out_channels, + kernel_size, + stride, + padding, + fm_order, + kdtype, + ): + if kdtype == np.bool_: + kernel = np.random.randint( + 0, 2, size=(out_channels, in_channels) + kernel_size, dtype=np.bool_ + ) + else: + kernel = np.random.randint( + np.iinfo(kdtype).min, + np.iinfo(kdtype).max, + size=(out_channels, in_channels) + kernel_size, + dtype=kdtype, + ) + + out_shape = ((in_shape[0] + 2 * padding[0] - kernel_size[0]) // stride[0] + 1,) + + f = Conv1dForward(in_shape, out_shape, kernel, stride, padding) + + if fm_order == "CL": + fm_shape = (in_channels,) + in_shape + else: + fm_shape = in_shape + (in_channels,) + + x = np.random.randint(0, 2, size=fm_shape, dtype=np.bool_) + xf = x.ravel() + + # The result of __call__ using traditional conv + y1 = f(xf) + # The result of matmul using the unrolled matrix + y2 = xf @ f.connectivity.astype(np.int32) + + expected = self._conv1d_golden(x, out_shape, kernel, stride, padding, fm_order) + + assert np.array_equal(y1, expected) + assert np.array_equal(y2, expected.ravel()) + assert f.connectivity.shape == ( + shape2num((kernel.shape[1],) + in_shape), + shape2num((kernel.shape[0],) + out_shape), + ) + + @staticmethod + def _conv2d_golden( + x: np.ndarray, + out_shape: Tuple[int, int], + kernel: np.ndarray, + stride: Tuple[int, int], + padding: Tuple[int, int], + fm_order: str, + ): + cout, cin, kh, kw = kernel.shape + + if fm_order == "HWC": + _x = x.transpose(2, 0, 1) + else: + _x = x + + xcin, ih, iw = _x.shape + + assert cin == xcin + + oh = (ih - kh + 2 * padding[0]) // stride[0] + 1 + ow = (iw - kw + 2 * padding[1]) // stride[1] + 1 + + assert oh, ow == out_shape + + out = np.zeros((cout,) + out_shape, dtype=np.int64) + + x_padded = np.pad( + _x, + ((0, 0), (padding[0], padding[0]), (padding[1], padding[1])), + mode="constant", + ) + + for o in range(cout): + for i in range(cin): + conv_result = np.zeros((oh, ow), dtype=np.int64) + for h in range(oh): + for w in range(ow): + window = x_padded[ + i, + h * stride[0] : h * stride[0] + kh, + w * stride[1] : w * stride[1] + kw, + ] + conv_result[h, w] = np.sum(window * kernel[o, i, :, :]) + + out[o] += conv_result + + if fm_order == "HWC": + return out.transpose(1, 2, 0) + else: + return out + + @pytest.mark.parametrize( + "in_shape, in_channels, out_channels, kernel_size, stride, padding, fm_order, kdtype", + # Padding is fixed at (0, 0) + [ + ((28, 28), 16, 8, (3, 3), (1, 1), (0, 0), "CHW", np.bool_), + ((28, 28), 24, 12, (3, 3), (2, 2), (0, 0), "CHW", np.bool_), + ((28, 28), 16, 8, (3, 3), (1, 1), (0, 0), "CHW", np.bool_), + ((28, 28), 24, 12, (3, 3), (2, 2), (0, 0), "CHW", np.int8), + ((28, 28), 24, 12, (5, 5), (2, 1), (0, 0), "CHW", np.int8), + ((16, 16), 8, 16, (3, 3), (2, 2), (0, 0), "CHW", np.int8), + # ((28, 28), 16, 8, (3, 3), (1, 1), (0, 0), "HWC", np.bool_), + # ((24, 32), 8, 8, (3, 4), (2, 1), (0, 0), "HWC", np.bool_), + # ((24, 24), 8, 16, (7, 7), (2, 2), (0, 0), "HWC", np.bool_), + # ((32, 16), 4, 12, (5, 7), (1, 2), (0, 0), "HWC", np.int8), + # ((24, 24), 8, 16, (7, 7), (2, 2), (0, 0), "HWC", np.int8), + # ((32, 16), 4, 12, (5, 7), (1, 2), (0, 0), "HWC", np.int8), + ], + ) + def test_Conv2dForward( + self, + in_shape, + in_channels, + out_channels, + kernel_size, + stride, + padding, + fm_order, + kdtype, + ): + if kdtype == np.bool_: + kernel = np.random.randint( + 0, 2, size=(out_channels, in_channels) + kernel_size, dtype=np.bool_ + ) + else: + kernel = np.random.randint( + np.iinfo(kdtype).min, + np.iinfo(kdtype).max, + size=(out_channels, in_channels) + kernel_size, + dtype=kdtype, + ) + + out_shape = ( + (in_shape[0] + 2 * padding[0] - kernel_size[0]) // stride[0] + 1, + (in_shape[1] + 2 * padding[1] - kernel_size[1]) // stride[1] + 1, + ) + + f = Conv2dForward(in_shape, out_shape, kernel, stride, padding) + + if fm_order == "CHW": + fm_shape = (in_channels,) + in_shape + else: + fm_shape = in_shape + (in_channels,) + + x = np.random.randint(0, 2, size=fm_shape, dtype=np.bool_) + xf = x.ravel() + + # The result of __call__ using traditional conv + y1 = f(xf) + # The result of matmul using the unrolled matrix + y2 = xf @ f.connectivity.astype(np.int32) + + expected = self._conv2d_golden(x, out_shape, kernel, stride, padding, fm_order) + + assert np.array_equal(y1, expected) + assert np.array_equal(y2, expected.ravel()) + assert f.connectivity.shape == ( + shape2num((kernel.shape[1],) + in_shape), + shape2num((kernel.shape[0],) + out_shape), + ) diff --git a/tests/test_network.py b/tests/test_network.py index 009d9771..c7d646da 100644 --- a/tests/test_network.py +++ b/tests/test_network.py @@ -7,41 +7,7 @@ from paibox.node import NodeDict -class Nested_Net_level_1(pb.DynSysGroup): - """Level 1 nested network: pre_n -> syn -> post_n""" - - def __init__(self): - super().__init__() - - self.pre_n = pb.LIF((10,), 10) - self.post_n = pb.LIF((10,), 10) - - w = np.random.randint(-128, 127, (10, 10), dtype=np.int8) - self.syn = pb.NoDecay( - self.pre_n, self.post_n, conn_type=pb.synapses.ConnType.All2All, weights=w - ) - - class TestNetwork_Components_Discover: - def test_flatten_hzynet(self, build_MoreInput_Net): - net = build_MoreInput_Net - - nodes1 = net.nodes(method="relative", level=1, include_self=True) - assert nodes1[""] == net - assert len(nodes1) == 8 - - # 2. Relative + include_self == False - nodes2 = net.nodes(method="relative", level=1, include_self=False) - assert len(nodes2) == 7 - - # 3. Absolute + include_self == True - nodes3 = net.nodes(method="absolute", level=1, include_self=True) - assert len(nodes3) == 8 - - # 4. Absolute + include_self == False - nodes4 = net.nodes(method="absolute", level=1, include_self=False) - assert len(nodes4) == 7 - def test_flatten_nodes(self, build_NotNested_Net): net = build_NotNested_Net @@ -70,7 +36,7 @@ def test_flatten_nodes(self, build_NotNested_Net): ) assert len(nodes4) == 3 - def test_nested_net_level_1(self, build_Network_with_container): + def test_nested_net_L1(self, build_Network_with_container): net = build_Network_with_container # 1. Relative + include_self == True @@ -103,28 +69,9 @@ def test_nested_net_level_1(self, build_Network_with_container): sim.run(10) sim.reset() - def test_nested_net_level_2(self): - class Nested_Net_level_2(pb.DynSysGroup): - """Level 2 nested network: inp1 -> s1 -> Nested_Net_level_1 -> s2 -> Nested_Net_level_1""" - - def __init__(self): - self.inp1 = pb.InputProj(1, shape_out=(10,)) - subnet1 = Nested_Net_level_1() - subnet2 = Nested_Net_level_1() - self.s1 = pb.NoDecay( - self.inp1, - subnet1.pre_n, - conn_type=pb.synapses.ConnType.One2One, - ) - self.s2 = pb.NoDecay( - subnet1.post_n, - subnet2.pre_n, - conn_type=pb.synapses.ConnType.One2One, - ) - - super().__init__(subnet1, subnet2) - - net = Nested_Net_level_2() + def test_nested_net_L2(self, build_Nested_Net_L2): + net: pb.Network = build_Nested_Net_L2 + nodes = net.nodes(level=1, include_self=False).subset(DynamicSys).unique() nodes_excluded = ( net.nodes(level=1, include_self=False) @@ -145,71 +92,48 @@ def __init__(self): .not_subset(pb.DynSysGroup) ) + from .conftest import Nested_Net_L1 + + assert isinstance(net[f"{Nested_Net_L1.__name__}_0"], pb.Network) + assert isinstance(net["Named_SubNet_L1_1"], pb.Network) + assert len(nodes) == 5 assert len(nodes_excluded) == 3 assert len(nodes2) == 3 + 3 * 2 assert len(nodes9) == len(nodes2) - def test_nested_net_level_3(self): - class Nested_Net_level_2(pb.DynSysGroup): - """Level 2 nested network: -> s1 -> Nested_Net_level_1""" - - def __init__(self, n: pb.neuron.Neuron): - subnet = Nested_Net_level_1() - self.s1 = pb.NoDecay( - n, - subnet.pre_n, - conn_type=pb.synapses.ConnType.One2One, - ) - - super().__init__(subnet) + del Nested_Net_L1 - class Nested_Net_level_3(pb.DynSysGroup): - """Level 3 nested network: inp1 -> s1 -> n1 -> Nested_Net_level_2 -> s1 -> Nested_Net_level_1""" + def test_nested_net_L2_find_nodes_recursively(self, build_Nested_Net_L2): + net: pb.Network = build_Nested_Net_L2 - def __init__(self): - self.inp1 = pb.InputProj(1, shape_out=(10,)) - self.n1 = pb.LIF((10,), 10) - - net_level2 = Nested_Net_level_2(self.n1) - self.s1 = pb.NoDecay( - self.inp1, - self.n1, - conn_type=pb.synapses.ConnType.One2One, - ) - - super().__init__(net_level2) - - net = Nested_Net_level_3() - nodes_excluded = ( - net.nodes(level=1, include_self=False) - .subset(DynamicSys) - .unique() - .not_subset(pb.DynSysGroup) - ) - nodes2 = ( - net.nodes(level=2, include_self=False) + nodes = ( + net.nodes(level=-1, include_self=False, find_recursive=True) .subset(DynamicSys) .unique() .not_subset(pb.DynSysGroup) ) - nodes3 = ( - net.nodes(level=3, include_self=False) + + assert len(nodes) == 3 + 3 * 2 + + def test_nested_net_L3_find_nodes_recursively(self, build_Nested_Net_L3): + net: pb.Network = build_Nested_Net_L3 + + nodes = ( + net.nodes(level=-1, include_self=False, find_recursive=True) .subset(DynamicSys) .unique() .not_subset(pb.DynSysGroup) ) - assert len(nodes_excluded) == 3 - assert len(nodes2) == 3 + 1 - assert len(nodes3) == 3 + 1 + 3 + assert len(nodes) == 2 + 3 + 2 * 3 class TestNetwork_Components_Oprations: def test_Collector_operations(self): s1 = pb.base.DynamicSys(name="s1") s2 = pb.InputProj(1, shape_out=1, name="s2") - s3 = pb.network.NeuDyn(name="s3") + s3 = pb.base.NeuDyn(name="s3") s4 = pb.DynSysGroup(s1, s2, name="s4") g1 = pb.DynSysGroup(s1, s2, s3, name="g1") @@ -235,17 +159,14 @@ def test_Collector_operations(self): assert len(g4_nodes.unique()) == 2 assert len(g3_nodes.exclude(pb.projection.Projection)) == 1 - assert len(g1_nodes.not_subset(pb.network.NeuDyn)) == 2 - assert len(g1_nodes.include(pb.network.NeuDyn, pb.projection.Projection)) == 2 + assert len(g1_nodes.not_subset(NeuDyn)) == 2 + assert len(g1_nodes.include(NeuDyn, pb.projection.Projection)) == 2 def test_add_components(self, build_NotNested_Net_Exp): net: pb.Network = build_NotNested_Net_Exp n3 = pb.LIF((3,), 10) - s1 = pb.synapses.NoDecay(net.n1, n3, conn_type=pb.synapses.ConnType.All2All) - s2 = pb.synapses.NoDecay(net.n2, n3, conn_type=pb.synapses.ConnType.All2All) - - with pytest.raises(ValueError): - net.diconnect_neudyn_succ(n3) + s1 = pb.FullConn(net.n1, n3, conn_type=pb.SynConnType.All2All) + s2 = pb.FullConn(net.n2, n3, conn_type=pb.SynConnType.All2All) # Add extra components into the network after initialization setattr(net, n3.name, n3) # key is 'LIF_0' @@ -260,29 +181,28 @@ def test_add_components(self, build_NotNested_Net_Exp): net.add_components(s2) assert getattr(net, s2.name, False) - def test_disconnect_neudyn_from(self, build_Network_with_container): + def test_disconnect_neuron_from(self, build_Network_with_container): net: pb.Network = build_Network_with_container # Disconnet the n_list[0] -> s1 -> n_list[1] # Nothing to disconnect so a warning is raised with pytest.warns(PAIBoxWarning): - removed = net.disconnect_neudyn_from( - net.n_list[0], net.n_list[2], remove=False - ) + removed = net.disconnect_neuron_from(net.n_list[0], net.n_list[2]) assert removed == [] nodes = net.nodes(level=1, include_self=False).subset(DynamicSys).unique() assert net.n_list[0].name in nodes # Remove the target synapse - removed = net.disconnect_neudyn_from(net.n_list[0], net.n_list[1], remove=True) + removed = net.disconnect_neuron_from(net.n_list[0], net.n_list[1]) assert len(removed) == 1 assert not getattr(net, "s1", False) - def test_disconnect_neudyn_succ(self, build_multi_inodes_onodes): + @pytest.mark.skip("Not implemented") + def test_disconnect_neuron_succ(self, build_multi_inodes_onodes): net: pb.Network = build_multi_inodes_onodes - removed = net.diconnect_neudyn_succ(net.n1, remove=True) + removed = net.diconnect_neuron_succ(net.n1) assert len(removed) == 2 assert not getattr(net, "s2", False) @@ -290,10 +210,11 @@ def test_disconnect_neudyn_succ(self, build_multi_inodes_onodes): assert getattr(net, "s1", False) assert getattr(net, "s3", False) - def test_disconnect_neudyn_pred(self, build_multi_inodes_onodes): + @pytest.mark.skip("Not implemented") + def test_replace_neuron_pred(self, build_multi_inodes_onodes): net: pb.Network = build_multi_inodes_onodes - removed = net.diconnect_neudyn_pred(net.n1, remove=True) + removed = net.replace_neuron_pred(net.n1) assert len(removed) == 2 assert not getattr(net, "s1", False) @@ -301,25 +222,24 @@ def test_disconnect_neudyn_pred(self, build_multi_inodes_onodes): assert getattr(net, "s2", False) assert getattr(net, "s4", False) - def test_insert_neudyn(self, build_Network_with_container): + def test_insert_between_neuron(self, build_Network_with_container): net: pb.Network = build_Network_with_container # Insert n3 between n_list[0] & n_list[1] n_insert = pb.LIF((3,), 10) - s_insert1 = pb.synapses.NoDecay( - net.n_list[0], n_insert, conn_type=pb.synapses.ConnType.All2All + s_insert1 = pb.FullConn( + net.n_list[0], n_insert, conn_type=pb.SynConnType.All2All ) - s_insert2 = pb.synapses.NoDecay( - n_insert, net.n_list[1], conn_type=pb.synapses.ConnType.All2All + s_insert2 = pb.FullConn( + n_insert, net.n_list[1], conn_type=pb.SynConnType.All2All ) # Replace s1 with s_insert1->n_insert->s_insert2 - net.insert_neudyn( + net.insert_between_neuron( net.n_list[0], net.n_list[1], (n_insert, s_insert1, s_insert2), replace=True, - remove=False, ) nodes = net.nodes(level=1, include_self=False).subset(DynamicSys).unique() @@ -328,46 +248,18 @@ def test_insert_neudyn(self, build_Network_with_container): assert s_insert2.name in nodes assert getattr(net, f"{s_insert1.name}", False) - assert getattr(net, "s1", False) # s1 is still in the network - assert net.s1.name in nodes + assert not getattr(net, "s1", False) # s1 ie removed from the network assert getattr(net, f"{s_insert2.name}", False) in list( net.n_list[1].master_nodes.values() ) - @pytest.mark.skip(reason="Not implemented") - def test_Subnets(self, build_Network_with_subnet): - net = build_Network_with_subnet - - # 1. Relative + include_self == True, level 1 - nodes1 = ( - net.nodes(method="absolute", level=1, include_self=False) - .subset(DynamicSys) - .unique() - ) - nodes1_sub = nodes1.subset(NeuDyn) - - # 2. Relative + include_self == True, level 2 - nodes2 = ( - net.nodes(method="absolute", level=2, include_self=False) - .subset(DynamicSys) - .unique() - ) - - nodes3 = ( - net.nodes(method="absolute", level=7, include_self=False) - .subset(DynamicSys) - .unique() - ) - - print() - -@pytest.mark.skip(reason="'Sequential is not used'") +@pytest.mark.skip(reason="'Sequential' is not used") def test_Sequential_build(): - n1 = pb.neuron.TonicSpiking(10, fire_step=3) - n2 = pb.neuron.TonicSpiking(10, fire_step=5) - s1 = pb.synapses.NoDecay(n1, n2, conn_type=pb.synapses.ConnType.All2All) + n1 = pb.TonicSpiking(10, fire_step=3) + n2 = pb.TonicSpiking(10, fire_step=5) + s1 = pb.FullConn(n1, n2, conn_type=pb.SynConnType.All2All) sequential = pb.network.Sequential(n1, s1, n2) assert isinstance(sequential, pb.network.Sequential) @@ -378,24 +270,22 @@ def test_Sequential_build(): class Seq(pb.network.Sequential): def __init__(self): super().__init__() - self.n1 = pb.neuron.TonicSpiking(5, fire_step=3) - self.n2 = pb.neuron.TonicSpiking(5, fire_step=5) - self.s1 = pb.synapses.NoDecay( - self.n1, self.n2, conn_type=pb.synapses.ConnType.All2All - ) + self.n1 = pb.TonicSpiking(5, fire_step=3) + self.n2 = pb.TonicSpiking(5, fire_step=5) + self.s1 = pb.FullConn(self.n1, self.n2, conn_type=pb.SynConnType.All2All) seq = Seq() nodes2 = seq.nodes(method="absolute", level=1, include_self=False) assert len(nodes2) == 3 -@pytest.mark.skip(reason="'Sequential is not used'") +@pytest.mark.skip(reason="'Sequential' is not used") def test_Sequential_getitem(): - n1 = pb.neuron.TonicSpiking(10, fire_step=3, name="n1") - n2 = pb.neuron.TonicSpiking(10, fire_step=5, name="n2") - s1 = pb.synapses.NoDecay(n1, n2, conn_type=pb.synapses.ConnType.All2All) - n3 = pb.neuron.TonicSpiking(10, fire_step=5, name="n3") - s2 = pb.synapses.NoDecay(n2, n3, conn_type=pb.synapses.ConnType.All2All) + n1 = pb.TonicSpiking(10, fire_step=3, name="n1") + n2 = pb.TonicSpiking(10, fire_step=5, name="n2") + s1 = pb.FullConn(n1, n2, conn_type=pb.SynConnType.All2All) + n3 = pb.TonicSpiking(10, fire_step=5, name="n3") + s2 = pb.FullConn(n2, n3, conn_type=pb.SynConnType.All2All) sequential = pb.network.Sequential(n1, s1, n2, s2, n3, name="Sequential_2") assert isinstance(sequential.children, NodeDict) diff --git a/tests/test_projection.py b/tests/test_projection.py index 7fc285e7..5311a39d 100644 --- a/tests/test_projection.py +++ b/tests/test_projection.py @@ -101,8 +101,6 @@ def test_passing_args_through_run(self): def fakeout_with_args(t, bias, *args, **kwargs): return np.ones((10, 10), dtype=np.int8) * bias - FRONTEND_ENV.clear_ctx("bias") - inp = pb.InputProj( input=fakeout_with_args, shape_out=(10, 10), keep_shape=False ) @@ -162,14 +160,33 @@ def test_input_PeriodicEncoder(self): sim.run(10) assert len(sim.data[prob]) == 10 + assert sim.data[prob][-1].size == 3 + + @pytest.mark.parametrize("encoding_func", ["linear", "log"]) + def test_input_LatencyEncoder(self, encoding_func): + N = 6 + x = np.random.rand(N) + T = 20 + + le = pb.simulator.LatencyEncoder(T, encoding_func) + inp = pb.InputProj(le, shape_out=(N,), keep_shape=False) + + sim = pb.Simulator(inp) + prob = pb.simulator.Probe(inp, "output") + sim.add_probe(prob) + + inp.input = x + sim.run(T) + assert len(sim.data[prob]) == T + assert sim.data[prob][-1].size == N def test_illegal_input(self): - def fakeout_with_t(t): + def fakeout_with_t(t, **kwargs): return np.ones((10, 10), dtype=np.int8) * t inp1 = pb.InputProj(None, shape_out=(4, 4), keep_shape=True) with pytest.raises(TypeError): - inp1.input = fakeout_with_t + inp1.input = fakeout_with_t # type: ignore sim = pb.Simulator(inp1)