Skip to content

Miscellaneous utilities

utils

check_possible_values(param_name, params, valid_values)

Check if the provided parameters are all valid values.

Parameters:

Name Type Description Default
param_name str

The name of the parameter (for error message).

required
params Iterable

The parameters to check.

required
valid_values Iterable

The valid values to check against.

required

Raises:

Type Description
ValueError

If any of the parameters are not one of the valid values.

Source code in src/bioplnn/utils/common.py
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
def check_possible_values(
    param_name: str,
    params: Union[Iterable, NDArray[Any], torch.Tensor],
    valid_values: Union[Iterable, NDArray[Any], torch.Tensor],
) -> None:
    """Check if the provided parameters are all valid values.

    Args:
        param_name (str): The name of the parameter (for error message).
        params (Iterable): The parameters to check.
        valid_values (Iterable): The valid values to check against.

    Raises:
        ValueError: If any of the parameters are not one of the valid values.
    """
    if isinstance(params, torch.Tensor):
        params = set(torch.unique(params).tolist())
    elif isinstance(params, np.ndarray):
        params = set(np.unique(params).tolist())
    else:
        params = set(params)

    if isinstance(valid_values, torch.Tensor):
        valid_values = set(torch.unique(valid_values).tolist())
    elif isinstance(valid_values, np.ndarray):
        valid_values = set(np.unique(valid_values).tolist())
    else:
        valid_values = set(valid_values)

    if not params <= valid_values:
        raise ValueError(f"{param_name} must be one of {valid_values}.")

count_parameters(model)

Count the number of trainable parameters in a model.

Parameters:

Name Type Description Default
model

PyTorch model.

required

Returns:

Name Type Description
int

Number of trainable parameters.

Source code in src/bioplnn/utils/torch.py
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
def count_parameters(model):
    """Count the number of trainable parameters in a model.

    Args:
        model: PyTorch model.

    Returns:
        int: Number of trainable parameters.
    """
    total_params = 0
    for param in model.parameters():
        num_params = (
            param._nnz()
            if param.layout
            in (torch.sparse_coo, torch.sparse_csr, torch.sparse_csc)
            else param.numel()
        )
        total_params += num_params
    return total_params

create_neuron_typed_connectome(num_neurons, neuron_type_probs, neuron_type_connectivity, deterministic_type_assignment=False)

Initialize a synthetic connectome as a sparse adjacency matrix.

Parameters:

Name Type Description Default
num_neurons int

Total number of neurons.

required
neuron_type_probs ndarray

Proportion of each neuron type, summing to 1.

required
neuron_type_connectivity ndarray

neuron-type to neuron-type connectivity probabilities.

required
deterministic_type_assignment bool

If True, assigns neurons deterministically based on neuron_type_probs interpreted as exact counts rather than probabilities. Defaults to False.

False

Returns:

Type Description
tuple[Tensor, ndarray]

tuple[torch.sparse.FloatTensor, np.ndarray]: Sparse adjacency matrix of neuron-neuron connections and the assigned neuron types.

Source code in src/bioplnn/utils/torch.py
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
def create_neuron_typed_connectome(
    num_neurons: int,
    neuron_type_probs: np.ndarray,
    neuron_type_connectivity: np.ndarray,
    deterministic_type_assignment: bool = False,
) -> tuple[torch.Tensor, np.ndarray]:
    """Initialize a synthetic connectome as a sparse adjacency matrix.

    Args:
        num_neurons (int): Total number of neurons.
        neuron_type_probs (np.ndarray): Proportion of each neuron type, summing to 1.
        neuron_type_connectivity (np.ndarray): neuron-type to neuron-type connectivity probabilities.
        deterministic_type_assignment (bool, optional): If True, assigns neurons deterministically based on neuron_type_probs
            interpreted as exact counts rather than probabilities. Defaults to False.

    Returns:
        tuple[torch.sparse.FloatTensor, np.ndarray]: Sparse adjacency matrix of neuron-neuron connections
            and the assigned neuron types.
    """

    # Determine number of neuron types
    num_neuron_types = len(neuron_type_probs)

    # Assign neuron types to neurons
    if deterministic_type_assignment:
        neuron_counts = (np.array(neuron_type_probs) * num_neurons).astype(int)
        neuron_types = np.concatenate(
            [
                np.full(count, i, dtype=int)
                for i, count in enumerate(neuron_counts)
            ]
        )
        np.random.shuffle(neuron_types)  # Shuffle to avoid ordering bias
    else:
        neuron_types = np.random.choice(
            num_neuron_types, size=num_neurons, p=neuron_type_probs
        )

    # Generate all possible neuron-neuron pairs
    row_indices, col_indices = np.meshgrid(
        np.arange(num_neurons), np.arange(num_neurons), indexing="ij"
    )
    row_indices = row_indices.flatten()
    col_indices = col_indices.flatten()

    # Get corresponding neuron-type pairs
    src_types = neuron_types[row_indices]
    tgt_types = neuron_types[col_indices]

    # Get connection probabilities from the connectivity matrix
    probs = neuron_type_connectivity[src_types, tgt_types]

    # Sample connections
    mask = np.random.rand(len(probs)) < probs
    row_indices = row_indices[mask]
    col_indices = col_indices[mask]

    # Create sparse adjacency matrix
    indices = torch.tensor([row_indices, col_indices], dtype=torch.long)
    values = torch.ones(len(row_indices), dtype=torch.float)

    sparse_adj = torch.sparse_coo_tensor(
        indices,
        values,
        (num_neurons, num_neurons),
        check_invariants=True,
    ).coalesce()

    return sparse_adj, neuron_types

create_sparse_projection(size, num_neurons, indices=None, mode='ih')

Create identity connectivity for input-to-hidden or hidden-to-output connections.

Parameters:

Name Type Description Default
size int

Size of the input or output.

required
num_neurons int

Number of neurons in the hidden layer.

required
indices Union[Tensor, PathLike]

Indices of neurons that receive input. If None, all neurons receive input from corresponding input indices. Defaults to None.

None
mode Literal['ih', 'ho']

Whether to create input-to-hidden or hidden-to-output connectivity (only changes the orientation of the connectivity matrix). Defaults to "ih".

'ih'

Returns:

Type Description
Tensor

torch.Tensor: Sparse connectivity matrix in COO format.

Raises:

Type Description
ValueError

If input_indices are invalid.

Source code in src/bioplnn/utils/torch.py
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
def create_sparse_projection(
    size: int,
    num_neurons: int,
    indices: Optional[Union[torch.Tensor, PathLikeType]] = None,
    mode: Literal["ih", "ho"] = "ih",
) -> torch.Tensor:
    """Create identity connectivity for input-to-hidden or hidden-to-output
    connections.

    Args:
        size (int): Size of the input or output.
        num_neurons (int): Number of neurons in the hidden layer.
        indices (Union[torch.Tensor, PathLike], optional): Indices of
            neurons that receive input. If None, all neurons receive input from
            corresponding input indices. Defaults to None.
        mode (Literal["ih", "ho"], optional): Whether to create input-to-hidden
            or hidden-to-output connectivity (only changes the orientation of
            the connectivity matrix). Defaults to "ih".

    Returns:
        torch.Tensor: Sparse connectivity matrix in COO format.

    Raises:
        ValueError: If input_indices are invalid.
    """

    # Generate identity connectivity for input-to-hidden connections
    if indices is None:
        indices = torch.arange(size)

    if mode == "ih":
        indices = torch.stack((indices, torch.arange(size)))  # type: ignore
        shape = (num_neurons, size)
    else:
        indices = torch.stack((torch.arange(size), indices))  # type: ignore
        shape = (size, num_neurons)

    values = torch.ones(indices.shape[1])

    connectivity = torch.sparse_coo_tensor(
        indices,
        values,
        shape,
        check_invariants=True,
    ).coalesce()

    return connectivity

create_sparse_topographic_connectome(sheet_size, synapse_std, synapses_per_neuron, self_recurrence)

Create random topographic hidden-to-hidden connectivity.

Parameters:

Name Type Description Default
sheet_size tuple[int, int]

Size of the sheet-like neural layer (rows, columns).

required
synapse_std float

Standard deviation of the Gaussian distribution for sampling synapse connections.

required
synapses_per_neuron int

Number of incoming synapses per neuron.

required
self_recurrence bool

Whether neurons can connect to themselves.

required

Returns:

Type Description
Tensor

torch.Tensor: Sparse connectivity matrix in COO format.

Source code in src/bioplnn/utils/torch.py
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
def create_sparse_topographic_connectome(
    sheet_size: tuple[int, int],
    synapse_std: float,
    synapses_per_neuron: int,
    self_recurrence: bool,
) -> torch.Tensor:
    """Create random topographic hidden-to-hidden connectivity.

    Args:
        sheet_size (tuple[int, int]): Size of the sheet-like neural layer (rows,
            columns).
        synapse_std (float): Standard deviation of the Gaussian distribution for
            sampling synapse connections.
        synapses_per_neuron (int): Number of incoming synapses per neuron.
        self_recurrence (bool): Whether neurons can connect to themselves.

    Returns:
        torch.Tensor: Sparse connectivity matrix in COO format.
    """
    # Generate random connectivity for hidden-to-hidden connections
    num_neurons = sheet_size[0] * sheet_size[1]

    idx_1d = torch.arange(num_neurons)
    idx = idx_1D_to_2D_tensor(idx_1d, sheet_size[0], sheet_size[1]).t()

    synapses = (
        torch.randn(num_neurons, 2, synapses_per_neuron) * synapse_std
        + idx.unsqueeze(-1)
    ).long()

    if self_recurrence:
        synapses = torch.cat([synapses, idx.unsqueeze(-1)], dim=2)

    synapses = synapses.clamp(
        torch.zeros(2).view(1, 2, 1),
        torch.tensor((sheet_size[0] - 1, sheet_size[1] - 1)).view(1, 2, 1),
    )
    synapses = idx_2D_to_1D_tensor(
        synapses.transpose(0, 1).flatten(1), sheet_size[0], sheet_size[1]
    ).view(num_neurons, -1)

    synapse_root = idx_1d.unsqueeze(-1).expand(-1, synapses.shape[1])

    indices_hh = torch.stack((synapses, synapse_root)).flatten(1)

    ## He initialization of values (synapses_per_neuron is the fan_in)
    values_hh = torch.randn(indices_hh.shape[1]) * math.sqrt(
        2 / synapses_per_neuron
    )

    connectivity_hh = torch.sparse_coo_tensor(
        indices_hh,
        values_hh,
        (num_neurons, num_neurons),
        check_invariants=True,
    ).coalesce()

    return connectivity_hh

dict_flatten(d, delimiter='.', key=None)

Flattens a nested dictionary into a single-level dictionary.

Keys of the flattened dictionary will be the path to the value, with path components joined by delimiter.

Parameters:

Name Type Description Default
d dict

Dictionary to flatten.

required
delimiter str

String to join key path components. Defaults to ".".

'.'
key str

Current key prefix. Defaults to None.

None

Returns:

Name Type Description
dict

Flattened dictionary.

Raises:

Type Description
ValueError

If flattening would result in duplicate keys.

Source code in src/bioplnn/utils/common.py
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
def dict_flatten(d, delimiter=".", key=None):
    """Flattens a nested dictionary into a single-level dictionary.

    Keys of the flattened dictionary will be the path to the value, with path
    components joined by delimiter.

    Args:
        d (dict): Dictionary to flatten.
        delimiter (str, optional): String to join key path components.
            Defaults to ".".
        key (str, optional): Current key prefix. Defaults to None.

    Returns:
        dict: Flattened dictionary.

    Raises:
        ValueError: If flattening would result in duplicate keys.
    """
    key = f"{key}{delimiter}" if key is not None else ""
    non_dicts = {
        f"{key}{k}": v for k, v in d.items() if not isinstance(v, dict)
    }
    dicts = {
        f"{key}{k}": v
        for _k, _v in d.items()
        if isinstance(_v, dict)
        for k, v in dict_flatten(_v, delimiter=delimiter, key=_k).items()
    }

    if in_both := dicts.keys() & non_dicts.keys():
        if len(in_both) > 1:
            raise ValueError(
                f"flattened keys {list(in_both)} used more than once in dict"
            )
        else:
            raise ValueError(
                f"flattened key {list(in_both)[0]} used more than once in dict"
            )

    return {**non_dicts, **dicts}

expand_array_2d(x, m, n, depth=0)

Expands a value to a 2D numpy array of shape (m, n).

Use depth > 0 if the intended type T can be indexed recursively, where depth is the maximum number of times x can be recursively indexed if of type T. For example, if x is a shallow list, then depth = 1. If x is a list of lists or an array or tensor, then depth = 2.

Parameters:

Name Type Description Default
x Any

The variable to expand.

required
m int

The number of rows in the expanded array.

required
n int

The number of columns in the expanded array.

required
depth int

The depth x can be recursively indexed. A depth of -1 will assume x is of type list[T] and check if x is already of the correct shape.

0

Returns:

Type Description
NDArray[Any]

np.ndarray: Expanded 2D numpy array.

Source code in src/bioplnn/utils/common.py
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
def expand_array_2d(
    x: Optional[ScalarOrArray2dType[T]], m: int, n: int, depth: int = 0
) -> NDArray[Any]:
    """Expands a value to a 2D numpy array of shape (m, n).

    Use depth > 0 if the intended type T can be indexed recursively, where
    depth is the maximum number of times x can be recursively indexed if of
    type T. For example, if x is a shallow list, then depth = 1. If x is a
    list of lists or an array or tensor, then depth = 2.

    Args:
        x (Any): The variable to expand.
        m (int): The number of rows in the expanded array.
        n (int): The number of columns in the expanded array.
        depth (int, optional): The depth x can be recursively indexed. A depth
            of -1 will assume x is of type list[T] and check if x is already of
            the correct shape.

    Returns:
        np.ndarray: Expanded 2D numpy array.
    """

    if m < 1 or n < 1:
        raise ValueError("m and n must be at least 1.")

    inner = x
    try:
        for _ in range(depth + 2):
            inner = inner[0]  # type: ignore
    except TypeError:
        array = np.empty((m, n), dtype=object)
        for i in range(m):
            for j in range(n):
                array[i, j] = x
        return array

    if x is None:
        assert depth == -1
        raise ValueError("x cannot be None if depth is -1.")

    x = np.array(x, dtype=object)

    if x.shape != (m, n):
        raise ValueError(f"x must have shape ({m}, {n}).")

    return x

expand_list(x, n, depth=0)

Expands a value to a list of length n.

If x is already a list, then the list is returned unchanged.

If x is not a list, then x is expanded to a list of length n.

Use depth > 0 if the intended type T can be indexed recursively, where depth is the maximum number of times x can be recursively indexed if of type T. For example, if x is a shallow list, then depth = 1. If T is a list of lists or an array or tensor, then depth = 2.

Parameters:

Name Type Description Default
x Any

The variable to expand.

required
n int

The number of lists or tuples to expand to.

required
depth int

The depth x can be recursively indexed. A depth of -1 will assume x is of type list[T] and check if x is already of the correct length. Defaults to 0.

0

Returns: list[Any]: Expanded list.

Source code in src/bioplnn/utils/common.py
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
def expand_list(
    x: Optional[ScalarOrListLike[T]], n: int, depth: int = 0
) -> Union[list[T], NDArray[Any]]:
    """Expands a value to a list of length n.

    If x is already a list, then the list is returned unchanged.

    If x is not a list, then x is expanded to a list of length n.

    Use depth > 0 if the intended type T can be indexed recursively, where
    depth is the maximum number of times x can be recursively indexed if of
    type T. For example, if x is a shallow list, then depth = 1. If T is a
    list of lists or an array or tensor, then depth = 2.

    Args:
        x (Any): The variable to expand.
        n (int): The number of lists or tuples to expand to.
        depth (int, optional): The depth x can be recursively indexed. A depth
            of -1 will assume x is of type list[T] and check if x is already of
            the correct length. Defaults to 0.
    Returns:
        list[Any]: Expanded list.
    """

    if n < 1:
        raise ValueError("n must be at least 1.")

    inner = x
    try:
        for _ in range(depth + 1):
            if isinstance(inner, str):
                raise TypeError
            inner = inner[0]  # type: ignore
    except (IndexError, TypeError):
        return [x] * n  # type: ignore

    if x is None:
        assert depth == -1
        raise ValueError("x cannot be None if depth is -1.")

    if len(x) != n:  # type: ignore
        raise ValueError(f"x must have length {n}.")

    return x  # type: ignore

get_activation(activation)

Get an initialized activation function module.

Parameters:

Name Type Description Default
activation Union[str, Module]

The name(s) of the activation function(s) or an already initialized nn.Module. If the latter, the moduleis returned as is. If None, returns nn.Identity().

required

Returns:

Type Description
Module

nn.Module: The initialized activation function.

Source code in src/bioplnn/utils/torch.py
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
def get_activation(
    activation: Union[str, None, nn.Module],
) -> nn.Module:
    """Get an initialized activation function module.

    Args:
        activation (Union[str, nn.Module], optional): The name(s) of the
            activation function(s) or an already initialized nn.Module. If the
            latter, the moduleis returned as is. If None, returns nn.Identity().

    Returns:
        nn.Module: The initialized activation function.
    """
    if isinstance(activation, nn.Module):
        return activation
    activation_classes = get_activation_class(activation)
    if isinstance(activation_classes, list):
        return nn.Sequential(*[act() for act in activation_classes])
    else:
        return activation_classes()

get_activation_class(activation)

Get one or more activation function classes.

If activation is a string with commas, split and get each activation.

Parameters:

Name Type Description Default
activation str

The name(s) of the activation function(s). If None, returns nn.Identity. Defaults to None.

required

Returns:

Type Description
Union[Type[Module], list[Type[Module]]]

Union[Type[nn.Module], List[Type[nn.Module]]]: A single activation class or a list of activation classes if comma-separated.

Source code in src/bioplnn/utils/torch.py
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
def get_activation_class(
    activation: Union[str, None],
) -> Union[Type[nn.Module], list[Type[nn.Module]]]:
    """Get one or more activation function classes.

    If activation is a string with commas, split and get each activation.

    Args:
        activation (str, optional): The name(s) of the activation function(s).
            If None, returns nn.Identity. Defaults to None.

    Returns:
        Union[Type[nn.Module], List[Type[nn.Module]]]: A single activation class
            or a list of activation classes if comma-separated.
    """
    if activation is None:
        return nn.Identity
    activations = [act.strip() for act in activation.split(",")]
    if len(activations) == 1:
        return _get_single_activation_class(activations[0])
    else:
        return [_get_single_activation_class(act) for act in activations]

idx_1D_to_2D_tensor(x, m, n)

Convert 1D indices to 2D coordinates.

Parameters:

Name Type Description Default
x Tensor

1D indices tensor.

required
m int

Number of rows in the 2D grid.

required
n int

Number of columns in the 2D grid.

required

Returns:

Type Description
Tensor

torch.Tensor: 2D coordinates tensor of shape (len(x), 2).

Source code in src/bioplnn/utils/torch.py
131
132
133
134
135
136
137
138
139
140
141
142
def idx_1D_to_2D_tensor(x: torch.Tensor, m: int, n: int) -> torch.Tensor:
    """Convert 1D indices to 2D coordinates.

    Args:
        x (torch.Tensor): 1D indices tensor.
        m (int): Number of rows in the 2D grid.
        n (int): Number of columns in the 2D grid.

    Returns:
        torch.Tensor: 2D coordinates tensor of shape (len(x), 2).
    """
    return torch.stack((x // m, x % n))

idx_2D_to_1D_tensor(x, m, n)

Convert 2D coordinates to 1D indices.

Parameters:

Name Type Description Default
x Tensor

2D coordinates tensor of shape (N, 2).

required
m int

Number of rows in the 2D grid.

required
n int

Number of columns in the 2D grid.

required

Returns:

Type Description
Tensor

torch.Tensor: 1D indices tensor.

Source code in src/bioplnn/utils/torch.py
145
146
147
148
149
150
151
152
153
154
155
156
def idx_2D_to_1D_tensor(x: torch.Tensor, m: int, n: int) -> torch.Tensor:
    """Convert 2D coordinates to 1D indices.

    Args:
        x (torch.Tensor): 2D coordinates tensor of shape (N, 2).
        m (int): Number of rows in the 2D grid.
        n (int): Number of columns in the 2D grid.

    Returns:
        torch.Tensor: 1D indices tensor.
    """
    return x[0] * n + x[1]

init_tensor(init_fn, *args, **kwargs)

Initialize a tensor with a specified initialization function.

Parameters:

Name Type Description Default
init_fn Union[str, TensorInitFnType]

The initialization function name or callable.

required
*args

Arguments to pass to the initialization function (usually shape).

()
**kwargs

Keyword arguments to pass to the initialization function.

{}

Returns:

Type Description
Tensor

torch.Tensor: The initialized tensor.

Raises:

Type Description
ValueError

If the initialization function is not supported.

Source code in src/bioplnn/utils/torch.py
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
def init_tensor(
    init_fn: Union[str, TensorInitFnType], *args, **kwargs
) -> torch.Tensor:
    """Initialize a tensor with a specified initialization function.

    Args:
        init_fn (Union[str, TensorInitFnType]): The initialization function name
            or callable.
        *args: Arguments to pass to the initialization function (usually shape).
        **kwargs: Keyword arguments to pass to the initialization function.

    Returns:
        torch.Tensor: The initialized tensor.

    Raises:
        ValueError: If the initialization function is not supported.
    """

    if isinstance(init_fn, str):
        if init_fn == "zeros":
            return torch.zeros(*args, **kwargs)
        elif init_fn == "ones":
            return torch.ones(*args, **kwargs)
        elif init_fn == "randn":
            return torch.randn(*args, **kwargs)
        elif init_fn == "rand":
            return torch.rand(*args, **kwargs)
        else:
            raise ValueError(
                "Invalid initialization function string. Must be 'zeros', "
                "'ones', 'randn', or 'rand'."
            )

    try:
        return init_fn(*args, **kwargs)
    except TypeError as e:
        if "device" in kwargs:
            return init_fn(*args, **kwargs).to(kwargs["device"])
        else:
            raise e

initialize_criterion(*, class_name, **kwargs)

Initialize a loss criterion.

Parameters:

Name Type Description Default
class_name str

The name of the criterion class to use.

required
**kwargs

Additional keyword arguments to pass to the criterion.

{}

Returns:

Type Description
Module

torch.nn.Module: The initialized criterion.

Source code in src/bioplnn/utils/initializers.py
77
78
79
80
81
82
83
84
85
86
87
def initialize_criterion(*, class_name: str, **kwargs) -> torch.nn.Module:
    """Initialize a loss criterion.

    Args:
        class_name (str): The name of the criterion class to use.
        **kwargs: Additional keyword arguments to pass to the criterion.

    Returns:
        torch.nn.Module: The initialized criterion.
    """
    return getattr(torch.nn, class_name)(**kwargs)

initialize_dataloader(*, dataset, seed=None, **kwargs)

Initialize a dataloader for a given dataset.

Parameters:

Name Type Description Default
dataset str

The dataset to use.

required
seed int

The seed to use for the dataloader. Defaults to None.

None
**kwargs

Additional keyword arguments to pass to the dataloader.

{}

Returns:

Type Description
tuple[DataLoader, DataLoader]

tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader]: The train and validation dataloaders.

Source code in src/bioplnn/utils/initializers.py
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
def initialize_dataloader(
    *, dataset: str, seed: Optional[int] = None, **kwargs
) -> tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader]:
    """Initialize a dataloader for a given dataset.

    Args:
        dataset (str): The dataset to use.
        seed (int, optional): The seed to use for the dataloader. Defaults to None.
        **kwargs: Additional keyword arguments to pass to the dataloader.

    Returns:
        tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader]:
            The train and validation dataloaders.
    """

    return getattr(dataloaders, f"get_{dataset}_dataloaders")(
        **kwargs, seed=seed
    )

initialize_model(*, class_name, **kwargs)

Initialize a model based on the class name.

Parameters:

Name Type Description Default
class_name str

The name of the model class to use.

required
**kwargs

Additional keyword arguments to pass to the model.

{}

Returns:

Type Description
Module

nn.Module: The initialized model.

Source code in src/bioplnn/utils/initializers.py
30
31
32
33
34
35
36
37
38
39
40
41
42
def initialize_model(*, class_name: str, **kwargs) -> nn.Module:
    """Initialize a model based on the class name.

    Args:
        class_name (str): The name of the model class to use.
        **kwargs: Additional keyword arguments to pass to the model.

    Returns:
        nn.Module: The initialized model.
    """
    import bioplnn.models

    return getattr(bioplnn.models, class_name)(**kwargs)

initialize_optimizer(*, class_name, model_parameters, **kwargs)

Initialize an optimizer for model training.

Parameters:

Name Type Description Default
class_name str

The name of the optimizer class to use.

required
model_parameters ParameterList

The model parameters to optimize.

required
**kwargs

Additional keyword arguments to pass to the optimizer.

{}

Returns:

Type Description
Optimizer

torch.optim.Optimizer: The initialized optimizer.

Source code in src/bioplnn/utils/initializers.py
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def initialize_optimizer(
    *, class_name: str, model_parameters: Iterator[nn.Parameter], **kwargs
) -> torch.optim.Optimizer:
    """Initialize an optimizer for model training.

    Args:
        class_name (str): The name of the optimizer class to use.
        model_parameters (nn.ParameterList): The model parameters to optimize.
        **kwargs: Additional keyword arguments to pass to the optimizer.

    Returns:
        torch.optim.Optimizer: The initialized optimizer.
    """
    return getattr(torch.optim, class_name)(model_parameters, **kwargs)

initialize_scheduler(*, class_name, optimizer, **kwargs)

Initialize a learning rate scheduler.

Parameters:

Name Type Description Default
class_name str

The name of the scheduler class to use.

required
optimizer Optimizer

The optimizer to schedule.

required
**kwargs

Additional keyword arguments to pass to the scheduler.

{}

Returns:

Type Description
LRScheduler

torch.optim.lr_scheduler.LRScheduler: The initialized scheduler.

Source code in src/bioplnn/utils/initializers.py
61
62
63
64
65
66
67
68
69
70
71
72
73
74
def initialize_scheduler(
    *, class_name: str, optimizer: torch.optim.Optimizer, **kwargs
) -> torch.optim.lr_scheduler.LRScheduler:
    """Initialize a learning rate scheduler.

    Args:
        class_name (str): The name of the scheduler class to use.
        optimizer (torch.optim.Optimizer): The optimizer to schedule.
        **kwargs: Additional keyword arguments to pass to the scheduler.

    Returns:
        torch.optim.lr_scheduler.LRScheduler: The initialized scheduler.
    """
    return getattr(torch.optim.lr_scheduler, class_name)(optimizer, **kwargs)

is_list_like(x)

Determines if an object is list-like (iterable but not a string or mapping).

Parameters:

Name Type Description Default
x Any

Object to check.

required

Returns:

Name Type Description
bool bool

True if the object is list-like, False otherwise.

Source code in src/bioplnn/utils/common.py
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
def is_list_like(x: Any) -> bool:
    """Determines if an object is list-like (iterable but not a string or mapping).

    Args:
        x (Any): Object to check.

    Returns:
        bool: True if the object is list-like, False otherwise.
    """
    if isinstance(x, (str, Mapping)):
        return False
    try:
        iter(x)
        if len(x) > 0:
            x[0]
    except Exception:
        return False
    return True

load_array(array)

Load a numpy array from an array, iterable, or file.

Supported file formats: - npz - npy - csv - pt

Parameters:

Name Type Description Default
array Union[ndarray, Tensor, DataFrame, Iterable[Any], PathLikeType]

numpy array, iterable, or path to file containing numpy array.

required

Returns:

Type Description
ndarray

The original or loaded numpy array.

Raises:

Type Description
ValueError

If the array cannot be loaded from the given file or iterable.

Source code in src/bioplnn/utils/torch.py
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
def load_array(
    array: Union[
        np.ndarray, torch.Tensor, pd.DataFrame, Iterable[Any], PathLikeType
    ],
) -> np.ndarray:
    """Load a numpy array from an array, iterable, or file.

    Supported file formats:
    - npz
    - npy
    - csv
    - pt

    Args:
        array: numpy array, iterable, or path to file containing numpy array.

    Returns:
        The original or loaded numpy array.

    Raises:
        ValueError: If the array cannot be loaded from the given file or iterable.
    """

    if isinstance(array, PathLikeType):
        return _load_array_from_file(array)
    return _load_array_from_iterable(array)

load_sparse_tensor(x)

Load a torch tensor from an array, iterable, or file.

Source code in src/bioplnn/utils/torch.py
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
def load_sparse_tensor(
    x: Union[
        torch.Tensor, pd.DataFrame, np.ndarray, Iterable[Any], PathLikeType
    ],
) -> torch.Tensor:
    """Load a torch tensor from an array, iterable, or file."""

    if isinstance(x, PathLikeType):
        x = _load_tensor_from_file(x)
    else:
        x = _load_tensor_from_iterable(x)

    x = x.to_sparse()

    if x._nnz() > SPARSE_TENSOR_WARNING_THRESHOLD * x.numel():
        warnings.warn(
            f"loaded a sparse tensor with more than "
            f"{SPARSE_TENSOR_WARNING_THRESHOLD:.0%}% non-zero elements. "
            "This is likely undesirable. Ensure your input is sufficiently "
            "sparse (whether explicitly or implicitly) to leverage the "
            "benefits of sparse tensors."
        )

    return x

load_tensor(tensor)

Load a torch tensor from an array, iterable, or file.

Source code in src/bioplnn/utils/torch.py
339
340
341
342
343
344
345
346
347
348
def load_tensor(
    tensor: Union[
        torch.Tensor, pd.DataFrame, np.ndarray, Iterable[Any], PathLikeType
    ],
) -> torch.Tensor:
    """Load a torch tensor from an array, iterable, or file."""

    if isinstance(tensor, PathLikeType):
        return _load_tensor_from_file(tensor)
    return _load_tensor_from_iterable(tensor)

manual_seed(seed)

Set random seeds for reproducibility.

Parameters:

Name Type Description Default
seed int

The random seed to use.

required
Source code in src/bioplnn/utils/torch.py
19
20
21
22
23
24
25
26
27
28
29
def manual_seed(seed: int):
    """Set random seeds for reproducibility.

    Args:
        seed (int): The random seed to use.
    """
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)

manual_seed_deterministic(seed)

Set random seeds and configure PyTorch for deterministic execution.

Parameters:

Name Type Description Default
seed int

The random seed to use.

required
Source code in src/bioplnn/utils/torch.py
32
33
34
35
36
37
38
39
40
41
42
def manual_seed_deterministic(seed: int):
    """Set random seeds and configure PyTorch for deterministic execution.

    Args:
        seed (int): The random seed to use.
    """
    manual_seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.use_deterministic_algorithms(True)
    os.environ.setdefault("CUBLAS_WORKSPACE_CONFIG", ":4096:8")

pass_fn(*args, **kwargs)

A no-op function that accepts any arguments and does nothing.

Parameters:

Name Type Description Default
*args

Any positional arguments.

()
**kwargs

Any keyword arguments.

{}
Source code in src/bioplnn/utils/common.py
11
12
13
14
15
16
17
18
def pass_fn(*args, **kwargs):
    """A no-op function that accepts any arguments and does nothing.

    Args:
        *args: Any positional arguments.
        **kwargs: Any keyword arguments.
    """
    pass

print_cuda_mem_stats(device=None)

Print CUDA memory statistics for debugging.

Source code in src/bioplnn/utils/torch.py
567
568
569
570
def print_cuda_mem_stats(device: Optional[torch.device] = None):
    """Print CUDA memory statistics for debugging."""
    f, t = torch.cuda.mem_get_info(device)
    print(f"Free/Total: {f / (1024**3):.2f}GB/{t / (1024**3):.2f}GB")

profile_fn(fn, sort_by='cuda_time_total', row_limit=50, profile_kwargs={}, fn_kwargs={})

Profile a function with PyTorch's profiler.

Parameters:

Name Type Description Default
fn

Function to profile.

required
sort_by str

Column to sort results by. Defaults to "cuda_time_total".

'cuda_time_total'
row_limit int

Maximum number of rows to display. Defaults to 50.

50
**fn_kwargs

Keyword arguments to pass to the function.

{}
Source code in src/bioplnn/utils/torch.py
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
def profile_fn(
    fn,
    sort_by="cuda_time_total",
    row_limit=50,
    profile_kwargs={},
    fn_kwargs={},
):
    """Profile a function with PyTorch's profiler.

    Args:
        fn: Function to profile.
        sort_by (str, optional): Column to sort results by. Defaults to
            "cuda_time_total".
        row_limit (int, optional): Maximum number of rows to display.
            Defaults to 50.
        **fn_kwargs: Keyword arguments to pass to the function.
    """
    with profile(
        activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
        **profile_kwargs,
    ) as prof:
        fn(**fn_kwargs)
    print(prof.key_averages().table(sort_by=sort_by, row_limit=row_limit))

without_keys(d, keys)

Creates a new dictionary without specified keys.

Parameters:

Name Type Description Default
d Mapping

Input dictionary.

required
keys list[str]

List of keys to exclude.

required

Returns:

Name Type Description
dict dict

A new dictionary without the specified keys.

Source code in src/bioplnn/utils/common.py
21
22
23
24
25
26
27
28
29
30
31
def without_keys(d: Mapping, keys: list[str]) -> dict:
    """Creates a new dictionary without specified keys.

    Args:
        d (Mapping): Input dictionary.
        keys (list[str]): List of keys to exclude.

    Returns:
        dict: A new dictionary without the specified keys.
    """
    return {x: d[x] for x in d if x not in keys}