Skip to content

Commit

Permalink
[SENTINEL] 1.0 release with GDA support (#276)
Browse files Browse the repository at this point in the history
* updated dockerfile

* no exact version pinning for metadata

* fix observer mode

* use public repo for metadata

* report app version in log

* bump version to 1.0

* simplified docker config

* move SENTINEL_BALANCE_THRESHOLD to config

* updated readme and example env

* fix toga script

* change default DB path

* GDA Snapshots  (#277)


* modify to run as cron

* get rpc urls from .env

* set only snapshot url variable

* update deployment

* remove log

---------

Co-authored-by: didi <[email protected]>
Co-authored-by: Axe <[email protected]>
  • Loading branch information
3 people authored Feb 21, 2024
1 parent 09b8804 commit c2add4f
Show file tree
Hide file tree
Showing 25 changed files with 754 additions and 317 deletions.
13 changes: 8 additions & 5 deletions .env-example → .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -120,16 +120,19 @@ HTTP_RPC_NODE=
## When running with Docker, this will affect the host port binding, not the binding inside the container.
#METRICS_PORT=9100

# Let the sentinel instance periodically report a basic metrics to a remote server.
# Set this to false in order to disable it.
## Let the sentinel instance periodically report a basic metrics to a remote server.
## Set this to false in order to disable it.
#TELEMETRY=true

# Default telemetry server instance provided by Superfluid
## Default telemetry server instance provided by Superfluid
#TELEMETRY_URL=https://sentinel-telemetry.x.superfluid.dev

# Reporting interval, defaults to 12 hours
## Reporting interval, defaults to 12 hours
#TELEMETRY_INTERVAL=43200

## Allows to set a custom instance name, included in the data sent to the telemetry server.
#INSTANCE_NAME=Sentinel

## If set, you get notified about key events like process (re)starts, configuration changes and error conditions
## to the Slack channel the hook belongs to.
#SLACK_WEBHOOK_URL=
Expand All @@ -147,7 +150,7 @@ HTTP_RPC_NODE=

## Location of the sqlite database. The file (and non-existing directories in the path) will be created if not existing.
## Note: this is ignored (overridden) when running with Docker.
#DB_PATH=db.sqlite
#DB_PATH=data/db.sqlite


## --- DOCKER PARAMETERS ---
Expand Down
4 changes: 3 additions & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@ name: Dev CI/CD
on:
push:
branches:
- '*'
- 'master'
pull_request:
branches: [master]

jobs:
build:
Expand Down
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ datadir/
snapshots/
coverage
typechain
networks
*error.log

# Hardhat files
cache
Expand All @@ -13,7 +15,7 @@ artifacts
database.sqlite
.env
.env*
!.env-example
!.env.example
.DS_Store
*.sqlite
.npmrc
Expand Down
35 changes: 21 additions & 14 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,27 +1,34 @@
# syntax = docker/dockerfile:1.3

# Always add commit hash for reproducability
FROM node:18-alpine@sha256:3482a20c97e401b56ac50ba8920cc7b5b2022bfc6aa7d4e4c231755770cf892f

# Enable prod optimizations
# Set environment variables
ENV NODE_ENV=production

# Create app directory
WORKDIR /app
RUN apk add --update --no-cache g++ make python3 && \
ln -sf python3 /usr/bin/python && \
apk add --update --no-cache yarn

COPY ["package.json", "yarn.lock", "./"]
RUN yarn install --frozen-lockfile --production
COPY . /app
# Install dependencies
RUN apk add --update --no-cache \
yarn \
tini

# Copy package.json and yarn.lock
COPY package.json yarn.lock ./

# Install app dependencies
RUN yarn install --production

# Copy the rest of the application
COPY . ./

# make sure we can write the data directory
RUN chown node:node data
RUN chown -R node:node /app/data \
&& chmod -R 755 /app/data

# Add a simple init system so that Node would respect process signals
RUN apk add --no-cache tini
# Use tini as the entrypoint
ENTRYPOINT ["/sbin/tini", "--"]

# Don't run as root
USER node
CMD ["node", "main.js" ]

# Start the application
CMD ["node", "main.js"]
223 changes: 122 additions & 101 deletions README.md

Large diffs are not rendered by default.

20 changes: 4 additions & 16 deletions docker-compose-with-monitoring.yml
Original file line number Diff line number Diff line change
@@ -1,19 +1,16 @@
# Starts the Sentinel service and connected monitoring services: Prometheus and Grafana.

version: '3'
networks:
monitoring:
driver: bridge

services:
# the sentinel image is built from source
sentinel:
image: superfluidfinance/superfluid-sentinel
build: .
restart: unless-stopped
env_file: .env
environment:
- NODE_ENV=production
- DB_PATH=data/db.sqlite
# hardcode the port inside the container
- METRICS_PORT=9100
ports:
Expand All @@ -22,8 +19,6 @@ services:
- 9100
volumes:
- data:/app/data
networks:
- monitoring
deploy:
resources:
limits:
Expand All @@ -33,19 +28,15 @@ services:
memory: 50M
prometheus:
image: prom/prometheus:v2.36.1
container_name: prometheus
volumes:
- ./prometheus:/etc/prometheus
- prometheus_data:/prometheus
ports:
- ${PROMETHEUS_PORT:-9090}:9090
expose:
- ${PROMETHEUS_PORT:-9090}
networks:
- monitoring
grafana:
image: grafana/grafana:8.2.6
container_name: grafana
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/provisioning:/etc/grafana/provisioning
Expand All @@ -55,11 +46,8 @@ services:
- ${GRAFANA_PORT:-3000}:3000
expose:
- ${GRAFANA_PORT:-3000}
networks:
- monitoring

volumes:
prometheus_data: { }
grafana_data: { }
data: { }

prometheus_data:
grafana_data:
data:
11 changes: 2 additions & 9 deletions docker-compose.yml
Original file line number Diff line number Diff line change
@@ -1,28 +1,21 @@
# Minimal version which starts the sentinel service only, without additional monitoring services.
# Basic docker-compose file for running a sentinel.
# This is ideal for resource constrained environments or for use with custom monitoring setups.

version: '3'
services:
sentinel:
image: superfluidfinance/superfluid-sentinel:${SENTINEL_VERSION:-latest}
build: .
restart: unless-stopped
env_file: .env
environment:
- NODE_ENV=production
- DB_PATH=data/db.sqlite
# hardcode the port inside the container
- METRICS_PORT=9100
ports:
- ${METRICS_PORT:-9100}:9100
volumes:
- data:/app/data
deploy:
resources:
limits:
cpus: '0.50'
memory: 300M
reservations:
memory: 50M

volumes:
data:
9 changes: 4 additions & 5 deletions package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "superfluid-sentinel",
"version": "0.11.0",
"version": "1.0.0",
"description": "Superfluid Sentinel",
"main": "main.js",
"scripts": {
Expand All @@ -27,7 +27,7 @@
"dependencies": {
"@decentral.ee/web3-helpers": "^0.5.3",
"@slack/webhook": "^6.1.0",
"@superfluid-finance/ethereum-contracts": "1.9.1-dev.083b723.0",
"@superfluid-finance/ethereum-contracts": "1.9.0",
"@superfluid-finance/metadata": "^1.1.27",
"async": "^3.2.4",
"axios": "^1.4.0",
Expand All @@ -42,15 +42,14 @@
"node-telegram-bot-api": "^0.61.0",
"prom-client": "^14.2.0",
"sequelize": "^6.32.1",

"sqlite3": "^5.1.6",
"web3": "4.2.0",
"winston": "^3.10.0"
},
"devDependencies": {
"@nomicfoundation/hardhat-toolbox": "^3.0.0",
"@nomicfoundation/hardhat-chai-matchers": "^2.0.0",
"@nomicfoundation/hardhat-ethers": "^3.0.0",
"@nomicfoundation/hardhat-toolbox": "^3.0.0",
"@nomicfoundation/hardhat-verify": "^1.0.0",
"@truffle/contract": "^4.6.26",
"@typechain/ethers-v6": "^0.4.0",
Expand All @@ -59,7 +58,7 @@
"@types/mocha": ">=9.1.0",
"chai": "^4.2.0",
"ganache": "^7.9.0",
"hardhat": "^2.17.0",
"hardhat": "^2.20.1",
"hardhat-gas-reporter": "^1.0.8",
"husky": "^8.0.3",
"mocha": "^10.2.0",
Expand Down
2 changes: 1 addition & 1 deletion scripts/buildSnapshot.js
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ const EventModel = require("./../src/models/EventModel");
const Bootstrap = require("./../src/boot/bootstrap");
const LoadEvents = require("./../src/boot/loadEvents");
const DB = require("./../src/database/db");
const Repository = require("./../src/database/repository");
const Repository = require("./../src/database/businessRepository");
const Timer = require("./../src/utils/timer");
const metadata = require("@superfluid-finance/metadata/networks.json");
const {QueryTypes} = require("sequelize");
Expand Down
54 changes: 54 additions & 0 deletions scripts/generateManifest.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
const { writeFileSync, readFileSync } = require("fs");

function cleanLogData(path) {
const newCIDs = new Map();
try {
readFileSync(path, "utf8").split(/\r?\n/).forEach(line => {
const splitLine = line.split(",");
const fileName = splitLine[0];
const filtered = fileName.match("_(.*)_")
if(filtered) {
newCIDs.set(filtered[1], splitLine[1]);
}
});
return newCIDs;
} catch (err) {
console.log(err);
}
}

(() => {
try {
const myArgs = process.argv.slice(2);
const ipfsLog = myArgs[0];
const outputFile = myArgs[1];

console.log(`ipfs log file: ${ipfsLog}, output file: ${outputFile}`);

if(!ipfsLog) {
throw new Error("No IPFS log")
}
if(!outputFile) {
throw new Error("No output file")
}

const newCIDs = cleanLogData(ipfsLog);
/*if(newCIDs.size !== 10) {
throw new Error("IPFS log not complety")
}*/

// Read manifest data from local file
const manifestJson = JSON.parse(readFileSync('manifest.json', 'utf8'));

// Update manifest in memory
for (const [key, value] of newCIDs) {
manifestJson.networks[key].cid = value;
}

// Write updated manifest to output file
writeFileSync(outputFile, JSON.stringify(manifestJson, null, 2));

} catch (err) {
console.error(err)
}
})();
73 changes: 73 additions & 0 deletions scripts/manageSnapshots.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
#!/bin/bash

set -xe

#Variables
rpc_urls="${SNAPSHOT_RPC_URLS}"
ipfs_api="${IPFS_API}"

generate_snapshot() {
echo "Generating new snapshots..."
yarn install

if [ ! -d "snapshots" ]; then
mkdir snapshots
fi

# Get list of RPC URLs from environment variable
IFS=',' read -r -a rpc_array <<< "$rpc_urls"
for rpc in "${rpc_array[@]}"; do
echo "${rpc}"
[ -n "$rpc" ] && node ./scripts/buildSnapshot.js "https://$rpc"
done

echo "Generating done"
}


upload_snapshot() {
echo "Uploading snapshots..."
ipfs_logfile="logs/ipfs_$(date '+%Y-%m-%d').txt"
rm -f -- "$ipfs_logfile"
for file in ./snapshots/*.sqlite.gz; do
ipfs_hash=`ipfs --api "$ipfs_api" add -q $file`
echo $file,$ipfs_hash >> "$ipfs_logfile"
done
node ./scripts/generateManifest.js "$ipfs_logfile" manifest.json
ipfs_hash=`ipfs --api "$ipfs_api" add -q manifest.json`
echo manifest.json,$ipfs_hash >> "$ipfs_logfile"
# updating the manifest ipns link
ipfs --api "$ipfs_api" name publish --key=sentinel-manifest "$ipfs_hash"
echo "Uploading snapshots done"
}

clean_snapshots() {
echo "Cleaning snapshot folder..."
rm -f -- "$HOME/snapshots"/*.gz
echo "Cleaning done"
}

# Usage
usage() {
echo "Usage: $0 [-g] [-u] [-p] [-c]"
echo "Options:"
echo " -g Generate snapshots"
echo " -u Upload snapshots"
echo " -c Clean snapshots"
exit 1
}

# Command line options
while getopts "gupc" opt; do
case $opt in
g) generate_snapshot ;;
u) upload_snapshot ;;
c) clean_snapshots ;;
*) usage ;;
esac
done

# If no options are provided, show usage
if [[ $# -eq 0 ]]; then
usage
fi
Loading

0 comments on commit c2add4f

Please sign in to comment.