Source code for federatedscope.gfl.model.gat

import torch
import torch.nn.functional as F
from torch.nn import ModuleList
from torch_geometric.data import Data
from torch_geometric.nn import GATConv


[docs]class GAT_Net(torch.nn.Module): r"""GAT model from the "Graph Attention Networks" paper, in ICLR'18 Arguments: in_channels (int): dimension of input. out_channels (int): dimension of output. hidden (int): dimension of hidden units, default=64. max_depth (int): layers of GNN, default=2. dropout (float): dropout ratio, default=.0. """ def __init__(self, in_channels, out_channels, hidden=64, max_depth=2, dropout=.0): super(GAT_Net, self).__init__() self.convs = ModuleList() for i in range(max_depth): if i == 0: self.convs.append(GATConv(in_channels, hidden)) elif (i + 1) == max_depth: self.convs.append(GATConv(hidden, out_channels)) else: self.convs.append(GATConv(hidden, hidden)) self.dropout = dropout def reset_parameters(self): for m in self.convs: m.reset_parameters()
[docs] def forward(self, data): if isinstance(data, Data): x, edge_index = data.x, data.edge_index elif isinstance(data, tuple): x, edge_index = data else: raise TypeError('Unsupported data type!') for i, conv in enumerate(self.convs): x = conv(x, edge_index) if (i + 1) == len(self.convs): break x = F.relu(F.dropout(x, p=self.dropout, training=self.training)) return x