Skip to content

Commit

Permalink
preparing for a release
Browse files Browse the repository at this point in the history
  • Loading branch information
MartinXPN committed Mar 30, 2021
1 parent ba010d3 commit b661ce1
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 17 deletions.
8 changes: 4 additions & 4 deletions abcde/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ def gen_vertex_pairs(n: int, size: int) -> Tuple[np.array, np.array]:
@staticmethod
def generate_graph(n: int, graph_type: str) -> Data:
# Generate a random graph with NetworkX
m = np.random.randint(low=1, high=7)
p = 0.01 if m > 4 else 0.06 # np.random.uniform(low=0.02, high=0.08)
m = 4 # np.random.randint(low=1, high=7)
p = 0.05 # = 0.01 if m > 4 else 0.06 # np.random.uniform(low=0.02, high=0.08)
if graph_type == 'erdos_renyi': graph = nx.erdos_renyi_graph(n, p=4 / n)
elif graph_type == 'small-world': graph = nx.connected_watts_strogatz_graph(n, k=4, p=0.1)
elif graph_type == 'barabasi_albert': graph = nx.barabasi_albert_graph(n, m)
Expand All @@ -77,11 +77,11 @@ def generate_graph(n: int, graph_type: str) -> Data:
g.add_vertices(graph.nodes())
g.add_edges(graph.edges())

betweenness = np.expand_dims(g.betweenness(directed=False), -1)
betweenness = np.expand_dims(g.betweenness(directed=False), axis=-1)

degrees = nx.degree_centrality(graph)
degrees = np.array([degrees[n] for n in range(n)], dtype='float32')
degrees = np.expand_dims(degrees, -1)
degrees = np.expand_dims(degrees, axis=-1)

src_ids, tgt_ids = RandomGraphs.gen_vertex_pairs(n=n, size=5 * n)
edge_index = np.array(graph.to_directed(as_view=True).edges).T
Expand Down
4 changes: 2 additions & 2 deletions predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ def evaluate_all(model_path: Union[str, IO],
for dataset in ['com-youtube', 'amazon', 'cit-Patents', 'dblp', 'com-lj']:
print('Evaluating the dataset:', dataset)
real(model_path=model_path,
data_test=real_dir / dataset + '.txt',
label_file=real_dir / dataset + '_score.txt')
data_test=real_dir / (dataset + '.txt'),
label_file=real_dir / (dataset + '_score.txt'))

# Evaluate on synthetic datasets
synth_dir = Path(datasets_dir) / 'synthetic'
Expand Down
16 changes: 5 additions & 11 deletions train.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from pathlib import Path

import torch
from knockknock import telegram_sender
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint, LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger, CSVLogger, WandbLogger
Expand All @@ -13,7 +12,7 @@

# Fix the seed for reproducibility
fix_random_seed(42)
experiment = ExperimentSetup(name='prelu_var_data_discrete_triangle', create_latest=True, long_description="""
experiment = ExperimentSetup(name='vanilla_abcde', create_latest=True, long_description="""
Use PReLU instead of ReLU
Use Adam optimizer with big learning rate
Try to have variable number of edges in the generated graphs
Expand All @@ -26,33 +25,28 @@
torch.multiprocessing.set_sharing_strategy('file_system')


def fit(t: Trainer):
t.fit(model, datamodule=data)
return t.callback_metrics


if __name__ == '__main__':
loggers = [
CSVLogger(experiment.log_dir, name='history'),
TensorBoardLogger(experiment.log_dir, name=experiment.name, default_hp_metric=False),
WandbLogger(name=experiment.name, save_dir=experiment.log_dir, project='abcde', save_code=True, notes=experiment.long_description),
# AimLogger(experiment=experiment.name),
]
# Previous best: nb_gcn_cycles=(4, 4, 6, 6, 8), conv_sizes=(64, 64, 32, 32, 16), drops=(0, 0, 0, 0, 0)
model = ABCDE(nb_gcn_cycles=(4, 4, 6, 6, 8, 8),
conv_sizes=(48, 48, 32, 32, 24, 24),
drops=(0.3, 0.3, 0.2, 0.2, 0.1, 0.1),
lr_reduce_patience=2, dropout=0.1)
data = GraphDataModule(min_nodes=4000, max_nodes=5000, nb_train_graphs=160, nb_valid_graphs=240,
batch_size=16, graph_type='powerlaw', repeats=8, regenerate_epoch_interval=10,
cache_dir=Path('datasets') / 'cache_var_randp_1_7')
cache_dir=Path('datasets') / 'cache')
trainer = Trainer(logger=loggers, gradient_clip_val=0.3,
gpus=-1 if torch.cuda.is_available() else None, auto_select_gpus=True,
max_epochs=50, terminate_on_nan=True, enable_pl_optimizer=True,
reload_dataloaders_every_epoch=True,
callbacks=[
EarlyStopping(monitor='val_kendal', patience=6, verbose=True, mode='max'),
ModelCheckpoint(dirpath=experiment.model_save_path, filename='drop-{epoch:02d}-{val_kendal:.2f}', monitor='val_kendal', save_top_k=5, verbose=True, mode='max'),
ModelCheckpoint(dirpath=experiment.model_save_path, filename='model-{epoch:02d}-{val_kendal:.2f}', monitor='val_kendal', save_top_k=5, verbose=True, mode='max'),
LearningRateMonitor(logging_interval='epoch'),
])
fit(trainer)
trainer.fit(model, datamodule=data)
print(trainer.callback_metrics)

0 comments on commit b661ce1

Please sign in to comment.