Skip to content

Latest commit

 

History

History
487 lines (346 loc) · 10.7 KB

File metadata and controls

487 lines (346 loc) · 10.7 KB

API Reference

Python API documentation for the SNN FPGA Accelerator.

Core Classes

SNNAccelerator

Main interface for the accelerator.

from snn_fpga_accelerator import SNNAccelerator

# Simulation mode
accel = SNNAccelerator(simulation_mode=True)

# Hardware mode
accel = SNNAccelerator(bitstream_path='snn_integrated.bit', simulation_mode=False)

Methods

configure_network(config: dict)

config = {
    'num_neurons': 100,
    'threshold': 1000,
    'tau': 0.9,  # or 'leak_rate': 0x2C
    'refractory_period': 5,
    'connections': [
        {'src': [0, 1, 2], 'dst': [10, 11], 'weights': 'random'}
    ]
}
accel.configure_network(config)

infer(spikes, duration=0.1)

Run inference.

output_spikes = accel.infer(input_spikes, duration=0.1)

load_weights(weights: np.ndarray)

Load weight matrix.

weights = np.load('weights.npy')
accel.load_weights(weights)

update_weights(weights: np.ndarray)

Update weights (for learning).

accel.update_weights(new_weights)

reset()

Reset neuron states.

accel.reset()

Spike Encoding

PoissonEncoder

Rate-based encoding.

from snn_fpga_accelerator.spike_encoding import PoissonEncoder

encoder = PoissonEncoder(num_neurons=784, duration=0.1, max_rate=100.0)
spikes = encoder.encode(image_data)

TemporalEncoder

Intensity-to-latency encoding.

from snn_fpga_accelerator.spike_encoding import TemporalEncoder

encoder = TemporalEncoder(num_neurons=784, duration=0.1)
spikes = encoder.encode(image_data)

PhaseEncoder

Phase-based encoding.

from snn_fpga_accelerator.spike_encoding import PhaseEncoder

encoder = PhaseEncoder(num_neurons=784, duration=0.1, num_phases=8)
spikes = encoder.encode(image_data)

Learning Algorithms

STDPLearning

Spike-timing-dependent plasticity.

from snn_fpga_accelerator.learning import STDPLearning

stdp = STDPLearning(
    tau_pre=20.0,
    tau_post=20.0,
    a_plus=0.01,
    a_minus=0.01,
    w_min=0.0,
    w_max=1.0
)

# Update weights
new_weights = stdp.update(pre_spikes, post_spikes)

RSTDPLearning

Reward-modulated STDP.

from snn_fpga_accelerator.learning import RSTDPLearning

rstdp = RSTDPLearning(
    tau_pre=20.0,
    tau_post=20.0,
    a_plus=0.01,
    a_minus=0.01,
    w_min=0.0,
    w_max=1.0,
    reward_scale=1.0
)

# Update with reward
new_weights = rstdp.update(pre_spikes, post_spikes, reward)

PyTorch Integration

Model Conversion

Convert PyTorch model to SNN.

import torch
import torch.nn as nn
from snn_fpga_accelerator.pytorch import convert_to_snn

# Define ANN
class ANN(nn.Module):
    def __init__(self):
        super().__init__()
        self.fc1 = nn.Linear(784, 128)
        self.fc2 = nn.Linear(128, 10)
        
    def forward(self, x):
        x = torch.relu(self.fc1(x))
        return self.fc2(x)

# Convert to SNN
ann = ANN()
snn = convert_to_snn(ann, neuron_params={'threshold': 1000, 'tau': 0.9})

Training with PyTorch

import torch
import snntorch as snn

# Define SNN
class MySNN(nn.Module):
    def __init__(self):
        super().__init__()
        self.fc1 = nn.Linear(784, 128)
        self.lif1 = snn.Leaky(beta=0.9)
        self.fc2 = nn.Linear(128, 10)
        self.lif2 = snn.Leaky(beta=0.9)
    
    def forward(self, x):
        mem1 = self.lif1.init_leaky()
        mem2 = self.lif2.init_leaky()
        
        spk1, mem1 = self.lif1(self.fc1(x), mem1)
        spk2, mem2 = self.lif2(self.fc2(spk1), mem2)
        return spk2, mem2

# Train
model = MySNN()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
criterion = torch.nn.CrossEntropyLoss()

for epoch in range(num_epochs):
    for data, target in dataloader:
        optimizer.zero_grad()
        output, _ = model(data)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()

# Deploy to FPGA
from snn_fpga_accelerator.pytorch import export_weights
weights = export_weights(model)
accel.load_weights(weights)

Utilities

tau_to_leak_rate(tau: float) -> int

Convert tau to hardware leak_rate encoding.

from snn_fpga_accelerator.utils import tau_to_leak_rate

leak_rate = tau_to_leak_rate(0.9)  # Returns 0x2C (shift1=4, shift2=5)

leak_rate_to_tau(leak_rate: int) -> float

Convert hardware leak_rate to tau.

from snn_fpga_accelerator.utils import leak_rate_to_tau

tau = leak_rate_to_tau(0x2C)  # Returns ~0.906

visualize_spikes(spikes, duration=0.1)

Plot spike raster.

from snn_fpga_accelerator.utils import visualize_spikes
import matplotlib.pyplot as plt

visualize_spikes(spikes, duration=0.1)
plt.show()

spike_rate(spikes, duration=0.1)

Calculate spike rates.

from snn_fpga_accelerator.utils import spike_rate

rates = spike_rate(spikes, duration=0.1)

Hardware-Accurate Simulator

Bit-accurate simulation matching RTL/HLS behavior.

HWAccurateLIFNeuron

from snn_fpga_accelerator.hw_accurate_simulator import HWAccurateLIFNeuron

neuron = HWAccurateLIFNeuron(
    threshold=1000,
    leak_rate=0x2C,  # shift1=4, shift2=5
    refractory_period=5,
    reset_potential=0
)

# Simulate
for weight in input_weights:
    v_mem, spike = neuron.step(weight)

HWAccurateSTDPEngine

from snn_fpga_accelerator.hw_accurate_simulator import HWAccurateSTDPEngine

stdp = HWAccurateSTDPEngine(
    a_plus=4,
    a_minus=3,
    w_min=51,
    w_max=204,
    mu=16  # Q4.4 fixed-point (16 = 1.0)
)

# Update weight
delta_w = stdp.update_weight(pre_spike_time, post_spike_time, current_weight)

verify_lif_neuron()

Run LIF neuron verification tests.

from snn_fpga_accelerator.hw_accurate_simulator import verify_lif_neuron

verify_lif_neuron()  # Runs 3 tests, prints results

verify_stdp_engine()

Run STDP engine verification tests.

from snn_fpga_accelerator.hw_accurate_simulator import verify_stdp_engine

verify_stdp_engine()  # Runs 5 tests, prints results

HWAccurateSNNSimulator

Complete hardware-accurate SNN simulator matching the Core Group RTL architecture.

from snn_fpga_accelerator.hw_accurate_simulator import (
    HWAccurateSNNSimulator, LIFNeuronParams, STDPConfig
)

# Create simulator (16 groups × 128 neurons = 2,048 total)
sim = HWAccurateSNNSimulator(
    num_groups=16,
    neurons_per_group=128,
    neuron_params=LIFNeuronParams(threshold=1000, leak_rate=0x2C),
    clock_period_ns=10  # 100 MHz
)

# Set inter-group connections
sim.ct.add_entry(src_group=0, src_neuron=5, dst_group=1, dst_neuron=10,
                 weight=7, exc_inh=True)

# Inject spikes and step
sim.inject_external_spike(global_neuron_id=5)
output_spikes = sim.step_cycle()

# Reset
sim.reset()

HWCoreGroup

Bit-accurate simulator for a single core group (128 LIF neurons).

from snn_fpga_accelerator.hw_accurate_simulator import HWCoreGroup, LIFNeuronParams

group = HWCoreGroup(
    group_id=0,
    neurons_per_group=128,
    params=LIFNeuronParams(threshold=1000, leak_rate=0x2C)
)

# Load intra-group weights
group.set_weight(src=5, dst=10, weight=7, exc=True)  # 8-bit weight + exc flag

# Inject spike and process
group.inject_spike(neuron_id=5, weight=7, exc=True)
fired_neurons = group.process_spikes()
group.process_leak()

HWConnectivityTable

Sparse inter-group connection storage (matches synaptic_connectivity_table.v).

from snn_fpga_accelerator.hw_accurate_simulator import HWConnectivityTable

ct = HWConnectivityTable(num_groups=16, neurons_per_group=128)

# Add connection (max 16 fanout entries per source neuron)
ct.add_entry(src_group=0, src_neuron=5, dst_group=1, dst_neuron=10,
             weight=7, exc_inh=True)

# Lookup destinations for a spike
destinations = ct.lookup(src_group=0, src_neuron=5)
# Returns list of (dst_group, dst_neuron, weight, exc_inh)

HWEventRouter

Central spike routing hub (matches event_router_ng.v).

from snn_fpga_accelerator.hw_accurate_simulator import HWEventRouter

router = HWEventRouter(num_groups=16, connectivity_table=ct)

# Route a spike through the connectivity table
delivered = router.route_spike(src_group=0, src_neuron=5, groups=sim.groups)

Configuration

Neuron Parameters

Parameter Type Range Default Description
threshold int 1-65535 1000 Spike threshold
tau float 0.1-0.99 0.9 Decay constant
leak_rate int 0-255 - Raw hardware encoding (alternative to tau)
refractory_period int 0-255 5 Cycles after spike
reset_potential int 0-65535 0 Post-spike reset

Hardware Limits:

  • HLS Learning Engine: MAX_NEURONS = 720, MAX_SYNAPSES = 518,400 (720×720). Neuron IDs are 10-bit (neuron_id_t, range 0–719).
  • RTL Core Groups: 2,048 neurons (16 groups × 128). Neuron IDs are 11-bit (range 0–2047). Global ID = {group_id[3:0], local_id[6:0]}.

STDP Parameters

Parameter Type Range Default Description
tau_pre float 1-100 20.0 Pre-trace decay (ms)
tau_post float 1-100 20.0 Post-trace decay (ms)
a_plus float 0-1 0.01 LTP magnitude
a_minus float 0-1 0.01 LTD magnitude
w_min float 0-1 0.0 Min weight
w_max float 0-1 1.0 Max weight

Spike Format

Spikes represented as list of (neuron_id, timestamp) tuples. Neuron IDs are 10-bit (0–719 for hardware, higher for software-only simulation):

spikes = [
    (0, 0.001),     # Neuron 0 fires at t=1ms
    (5, 0.003),     # Neuron 5 fires at t=3ms
    (719, 0.010),   # Neuron 719 fires at t=10ms
]

Or as numpy array (neuron_id, timestamp, value):

spikes = np.array([
    [0, 0.001, 1.0],
    [5, 0.003, 1.0],
    [0, 0.010, 1.0]
])

Examples

Complete examples in examples/ directory:

  • examples/pytorch/mnist_training_example.py - MNIST classification
  • examples/pytorch/r_stdp_learning_example.py - R-STDP learning
  • examples/pytorch/mozafari_rstdp_faithful.py - 3-layer deep network
  • examples/complete_integration_example.py - Full workflow

Error Handling

Common exceptions:

from snn_fpga_accelerator.exceptions import (
    SNNConfigError,
    SNNBitstreamError,
    SNNHardwareError
)

try:
    accel = SNNAccelerator(bitstream_path='nonexistent.bit')
except SNNBitstreamError as e:
    print(f"Bitstream error: {e}")

try:
    accel.configure_network({'invalid': 'config'})
except SNNConfigError as e:
    print(f"Config error: {e}")

References