-
Notifications
You must be signed in to change notification settings - Fork 1
/
BiGCN.py
121 lines (102 loc) · 4.19 KB
/
BiGCN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from util_functions import dot_sim, use_cuda
'''
Decomposed Graph Prototype Network (DGPN)
--- At the firt layer, we decompose a k-hop gcn layer to {k+1} parts
--- At the second-last layer, we use a fc layer to map the local and global embeddings to pred the csd_matrix.
'''
device = use_cuda()
class BiGCN(nn.Module):
def __init__(self, n_in, n_h, dropout):
super(BiGCN, self).__init__()
self.fc1 = nn.Linear(n_in, n_h, bias=True)
# self.fc_local_pred_csd = nn.Linear(n_h, n_h, bias=True)
# self.fc_final_pred_csd = nn.Linear(n_h, n_h, bias=True) # used for the last layer
self.dropout = dropout
self.act = nn.ReLU()
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight.data)
def forward(self, X, S_X, S_A):
# the local item: 1. get k-hop-gcn by one layer; 2. get local loss
features = torch.mm(S_X, X)
features = self.fc1(features)
Y_X = torch.mm(features, S_A)
# Y_X = self.act(features)
Y_X = F.dropout(Y_X, p=self.dropout, training=self.training)
return Y_X
class BiGCN_X(nn.Module):
def __init__(self, n_in, n_h, dropout):
super(BiGCN_X, self).__init__()
self.fc1 = nn.Linear(n_in, 512, bias=True)
self.fc2 = nn.Linear(512, n_h)
# self.fc_local_pred_csd = nn.Linear(n_h, n_h, bias=True)
# self.fc_final_pred_csd = nn.Linear(n_h, n_h, bias=True) # used for the last layer
self.dropout = dropout
self.act = nn.ReLU()
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight.data)
#
def forward(self, X, S_X, S_A):
# the local item: 1. get k-hop-gcn by one layer; 2. get local loss
features = torch.mm(S_X, X)
features = self.fc1(features)
features = self.act(features)
features = self.fc2(features)
Y_X = torch.mm(features, S_A)
# Y_X = self.act(features)
Y_X = F.dropout(Y_X, p=self.dropout, training=self.training)
return Y_X
# def forward(self, X, S_X, S_A):
# # the local item: 1. get k-hop-gcn by one layer; 2. get local loss
# features = self.fc1(X)
# features = self.act(features)
# features = torch.mm(S_X, features)
# features = self.fc2(features)
# Y_X = torch.mm(features, S_A)
# # Y_X = self.act(features)
# Y_X = F.dropout(Y_X, p=self.dropout, training=self.training)
# return Y_X
# def forward(self, X, S_X, S_A):
# # the local item: 1. get k-hop-gcn by one layer; 2. get local loss
# features = self.fc1(X)
# features = self.act(features)
# features = self.fc2(features)
# features = self.act(features)
# features = torch.mm(S_X, features)
# Y_X = torch.mm(features, S_A)
# # Y_X = self.act(features)
# Y_X = F.dropout(Y_X, p=self.dropout, training=self.training)
# return Y_X
class BiGCN_A(nn.Module):
def __init__(self, n_in, n_h, dropout):
super(BiGCN_A, self).__init__()
self.fc1 = nn.Linear(n_in, 800, bias=True)
self.fc2 = nn.Linear(800, n_h, bias=True)
# self.fc_local_pred_csd = nn.Linear(n_h, n_h, bias=True)
# self.fc_final_pred_csd = nn.Linear(n_h, n_h, bias=True) # used for the last layer
self.dropout = dropout
self.act = nn.ReLU()
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight.data)
def forward(self, X, S_X, S_A):
# the local item: 1. get k-hop-gcn by one layer; 2. get local loss
features = torch.mm(S_X, X)
features = self.fc1(features)
features = self.act(features)
features = self.fc2(features)
Y_X = torch.mm(features, S_A)
# Y_X = self.act(features)
Y_X = F.dropout(Y_X, p=self.dropout, training=self.training)
return Y_X