Skip to content

nn

Module for neural network architectures implemented in PathPyG.

DBGNN

Bases: torch.nn.Module

Implementation of the time-aware graph neural network DBGNN for time-resolved data on dynamic graph.

The De Bruijn Graph Neural Network (DBGNN) is a time-aware graph neural network architecture designed for dynamic graphs that captures temporal-topological patterns through causal walks — temporally ordered sequences that represent how nodes influence each other over time. The architecture uses multiple layers of higher-order De Bruijn graphs for message passing, where nodes represent walks of increasing length, enabling the model to learn non-Markovian patterns in the causal topology of dynamic graphs.

Reference

Proposed by Qarkaxhija, Perri, and Scholtes at the first Learning on Graphs Conference (LoG) in 20221. The paper can be found here.


  1. Qarkaxhija, L., Perri, V. & Scholtes, I. De Bruijn Goes Neural: Causality-Aware Graph Neural Networks for Time Series Data on Dynamic Graphs. in LoG vol. 198 51 (PMLR, 2022). 

Source code in src/pathpyG/nn/dbgnn.py
class DBGNN(Module):
    """Implementation of the time-aware graph neural network DBGNN for time-resolved data on dynamic graph.

    The De Bruijn Graph Neural Network (DBGNN) is a time-aware graph neural network architecture designed for dynamic graphs
    that captures temporal-topological patterns through causal walks — temporally ordered sequences that represent how nodes influence each other over time.
    The architecture uses multiple layers of higher-order De Bruijn graphs for message passing, where nodes represent walks of increasing length,
    enabling the model to learn non-Markovian patterns in the causal topology of dynamic graphs.

    ??? reference
        Proposed by Qarkaxhija, Perri, and Scholtes at the first Learning on Graphs Conference (LoG) in 2022[^1]. The paper can be found [here](https://proceedings.mlr.press/v198/qarkaxhija22a/qarkaxhija22a.pdf).

    [^1]: *Qarkaxhija, L., Perri, V. & Scholtes, I. De Bruijn Goes Neural: Causality-Aware Graph Neural Networks for Time Series Data on Dynamic Graphs. in LoG vol. 198 51 (PMLR, 2022).*
    """

    def __init__(self, num_classes: int, num_features: tuple[int, int], hidden_dims: list[int], p_dropout: float = 0.0):
        """Initialize the DBGNN model.

        Args:
            num_classes: Number of classes for the classification task
            num_features: Number of features for first order and higher order nodes, i.e. `(first_order_num_features, second_order_num_features)`
            hidden_dims: Number of hidden dimensions per layer in the both GNN parts (first-order and higher-order)
            p_dropout: Drop-out probability for the dropout layers.
        """
        super().__init__()

        self.num_features = num_features
        self.num_classes = num_classes
        self.hidden_dims = hidden_dims
        self.p_dropout = p_dropout

        # higher-order layers
        self.higher_order_layers = ModuleList()
        self.higher_order_layers.append(GCNConv(self.num_features[1], self.hidden_dims[0]))

        # first-order layers
        self.first_order_layers = ModuleList()
        self.first_order_layers.append(GCNConv(self.num_features[0], self.hidden_dims[0]))

        for dim in range(1, len(self.hidden_dims) - 1):
            # higher-order layers
            self.higher_order_layers.append(GCNConv(self.hidden_dims[dim - 1], self.hidden_dims[dim]))
            # first-order layers
            self.first_order_layers.append(GCNConv(self.hidden_dims[dim - 1], self.hidden_dims[dim]))

        self.bipartite_layer = BipartiteGraphOperator(self.hidden_dims[-2], self.hidden_dims[-1])

        # Linear layer
        self.lin = torch.nn.Linear(self.hidden_dims[-1], num_classes)

    def forward(self, data: Data) -> torch.Tensor:
        """Forward pass of the DBGNN.

        Args:
            data: Input [data][torch_geometric.data.Data] object containing the graph structure and node features.
        """
        x = data.x
        x_h = data.x_h

        # First-order convolutions
        for layer in self.first_order_layers:
            x = F.dropout(x, p=self.p_dropout, training=self.training)
            x = F.elu(layer(x, data.edge_index, data.edge_weights))
        x = F.dropout(x, p=self.p_dropout, training=self.training)

        # Second-order convolutions
        for layer in self.higher_order_layers:
            x_h = F.dropout(x_h, p=self.p_dropout, training=self.training)
            x_h = F.elu(layer(x_h, data.edge_index_higher_order, data.edge_weights_higher_order))
        x_h = F.dropout(x_h, p=self.p_dropout, training=self.training)

        # Bipartite message passing
        x = torch.nn.functional.elu(
            self.bipartite_layer((x_h, x), data.bipartite_edge_index, n_ho=data.num_ho_nodes, n_fo=data.num_nodes)
        )
        x = F.dropout(x, p=self.p_dropout, training=self.training)

        # Linear layer
        x = self.lin(x)

        return x

__init__

Initialize the DBGNN model.

Parameters:

Name Type Description Default
num_classes int

Number of classes for the classification task

required
num_features tuple[int, int]

Number of features for first order and higher order nodes, i.e. (first_order_num_features, second_order_num_features)

required
hidden_dims list[int]

Number of hidden dimensions per layer in the both GNN parts (first-order and higher-order)

required
p_dropout float

Drop-out probability for the dropout layers.

0.0
Source code in src/pathpyG/nn/dbgnn.py
def __init__(self, num_classes: int, num_features: tuple[int, int], hidden_dims: list[int], p_dropout: float = 0.0):
    """Initialize the DBGNN model.

    Args:
        num_classes: Number of classes for the classification task
        num_features: Number of features for first order and higher order nodes, i.e. `(first_order_num_features, second_order_num_features)`
        hidden_dims: Number of hidden dimensions per layer in the both GNN parts (first-order and higher-order)
        p_dropout: Drop-out probability for the dropout layers.
    """
    super().__init__()

    self.num_features = num_features
    self.num_classes = num_classes
    self.hidden_dims = hidden_dims
    self.p_dropout = p_dropout

    # higher-order layers
    self.higher_order_layers = ModuleList()
    self.higher_order_layers.append(GCNConv(self.num_features[1], self.hidden_dims[0]))

    # first-order layers
    self.first_order_layers = ModuleList()
    self.first_order_layers.append(GCNConv(self.num_features[0], self.hidden_dims[0]))

    for dim in range(1, len(self.hidden_dims) - 1):
        # higher-order layers
        self.higher_order_layers.append(GCNConv(self.hidden_dims[dim - 1], self.hidden_dims[dim]))
        # first-order layers
        self.first_order_layers.append(GCNConv(self.hidden_dims[dim - 1], self.hidden_dims[dim]))

    self.bipartite_layer = BipartiteGraphOperator(self.hidden_dims[-2], self.hidden_dims[-1])

    # Linear layer
    self.lin = torch.nn.Linear(self.hidden_dims[-1], num_classes)

forward

Forward pass of the DBGNN.

Parameters:

Name Type Description Default
data torch_geometric.data.Data

Input data object containing the graph structure and node features.

required
Source code in src/pathpyG/nn/dbgnn.py
def forward(self, data: Data) -> torch.Tensor:
    """Forward pass of the DBGNN.

    Args:
        data: Input [data][torch_geometric.data.Data] object containing the graph structure and node features.
    """
    x = data.x
    x_h = data.x_h

    # First-order convolutions
    for layer in self.first_order_layers:
        x = F.dropout(x, p=self.p_dropout, training=self.training)
        x = F.elu(layer(x, data.edge_index, data.edge_weights))
    x = F.dropout(x, p=self.p_dropout, training=self.training)

    # Second-order convolutions
    for layer in self.higher_order_layers:
        x_h = F.dropout(x_h, p=self.p_dropout, training=self.training)
        x_h = F.elu(layer(x_h, data.edge_index_higher_order, data.edge_weights_higher_order))
    x_h = F.dropout(x_h, p=self.p_dropout, training=self.training)

    # Bipartite message passing
    x = torch.nn.functional.elu(
        self.bipartite_layer((x_h, x), data.bipartite_edge_index, n_ho=data.num_ho_nodes, n_fo=data.num_nodes)
    )
    x = F.dropout(x, p=self.p_dropout, training=self.training)

    # Linear layer
    x = self.lin(x)

    return x