432 lines
16 KiB
Nix
432 lines
16 KiB
Nix
{ lib, config, pkgs, ... }:
|
|
|
|
let
|
|
cfg = config.services.every-channel.op-stack;
|
|
scriptRoot = ../..;
|
|
bootstrapScript = "${scriptRoot}/scripts/op-stack/setup-rollup.sh";
|
|
downloadScript = "${scriptRoot}/scripts/op-stack/download-op-deployer.sh";
|
|
|
|
containerName = name: "every-channel-op-${name}";
|
|
in
|
|
{
|
|
options.services.every-channel.op-stack = {
|
|
enable = lib.mkEnableOption "every.channel OP Stack Sepolia testnet services";
|
|
|
|
rootDir = lib.mkOption {
|
|
type = lib.types.str;
|
|
default = "/var/lib/every-channel/op-stack";
|
|
description = "Persistent root directory for OP Stack bootstrap outputs and container state.";
|
|
};
|
|
|
|
privateKeyFile = lib.mkOption {
|
|
type = lib.types.nullOr lib.types.str;
|
|
default = null;
|
|
description = "File containing the Sepolia private key used for op-deployer and operator services.";
|
|
};
|
|
|
|
l1RpcUrl = lib.mkOption {
|
|
type = lib.types.str;
|
|
default = "https://ethereum-sepolia-rpc.publicnode.com";
|
|
description = "Sepolia L1 RPC URL.";
|
|
};
|
|
|
|
l1BeaconUrl = lib.mkOption {
|
|
type = lib.types.str;
|
|
default = "https://ethereum-sepolia-beacon-api.publicnode.com";
|
|
description = "Sepolia L1 beacon API URL.";
|
|
};
|
|
|
|
chainId = lib.mkOption {
|
|
type = lib.types.ints.positive;
|
|
default = 245245;
|
|
description = "L2 chain ID for the every.channel OP Stack testnet.";
|
|
};
|
|
|
|
p2pAdvertiseIp = lib.mkOption {
|
|
type = lib.types.str;
|
|
default = "127.0.0.1";
|
|
description = "Public IP advertised by op-node for P2P.";
|
|
};
|
|
|
|
p2pListenPort = lib.mkOption {
|
|
type = lib.types.port;
|
|
default = 9222;
|
|
description = "P2P listen port for op-node.";
|
|
};
|
|
|
|
ports = {
|
|
l2Http = lib.mkOption {
|
|
type = lib.types.port;
|
|
default = 28545;
|
|
description = "Local op-geth HTTP JSON-RPC port.";
|
|
};
|
|
|
|
l2Ws = lib.mkOption {
|
|
type = lib.types.port;
|
|
default = 28546;
|
|
description = "Local op-geth WebSocket JSON-RPC port.";
|
|
};
|
|
|
|
l2Auth = lib.mkOption {
|
|
type = lib.types.port;
|
|
default = 28551;
|
|
description = "Local op-geth Engine API port.";
|
|
};
|
|
|
|
l2P2p = lib.mkOption {
|
|
type = lib.types.port;
|
|
default = 28549;
|
|
description = "Local op-geth P2P port, kept away from the host Ethereum node's 30303.";
|
|
};
|
|
|
|
rollupRpc = lib.mkOption {
|
|
type = lib.types.port;
|
|
default = 28547;
|
|
description = "Local op-node rollup RPC port.";
|
|
};
|
|
|
|
batcherRpc = lib.mkOption {
|
|
type = lib.types.port;
|
|
default = 28548;
|
|
description = "Local op-batcher admin RPC port.";
|
|
};
|
|
|
|
proposerRpc = lib.mkOption {
|
|
type = lib.types.port;
|
|
default = 28560;
|
|
description = "Local op-proposer admin RPC port.";
|
|
};
|
|
};
|
|
|
|
openFirewall = lib.mkOption {
|
|
type = lib.types.bool;
|
|
default = true;
|
|
description = "Open the op-node P2P TCP/UDP port.";
|
|
};
|
|
|
|
disputeMonEnable = lib.mkOption {
|
|
type = lib.types.bool;
|
|
default = true;
|
|
description = "Run op-dispute-mon alongside the core OP Stack services.";
|
|
};
|
|
|
|
challengerEnable = lib.mkOption {
|
|
type = lib.types.bool;
|
|
default = true;
|
|
description = "Run op-challenger for the rollup.";
|
|
};
|
|
|
|
challengerPrestateFile = lib.mkOption {
|
|
type = lib.types.nullOr lib.types.str;
|
|
default = null;
|
|
description = "Path to the Cannon absolute prestate .bin.gz file used by op-challenger.";
|
|
};
|
|
|
|
opDeployerTag = lib.mkOption {
|
|
type = lib.types.str;
|
|
default = "op-deployer/v0.6.0-rc.3";
|
|
description = "Pinned op-deployer release tag used for bootstrap.";
|
|
};
|
|
|
|
images = {
|
|
opNode = lib.mkOption {
|
|
type = lib.types.str;
|
|
default = "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:v1.16.6";
|
|
description = "Container image for op-node.";
|
|
};
|
|
opGeth = lib.mkOption {
|
|
type = lib.types.str;
|
|
default = "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101702.0-rc.1";
|
|
description = "Container image for op-geth.";
|
|
};
|
|
batcher = lib.mkOption {
|
|
type = lib.types.str;
|
|
default = "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-batcher:v1.14.0";
|
|
description = "Container image for op-batcher.";
|
|
};
|
|
proposer = lib.mkOption {
|
|
type = lib.types.str;
|
|
default = "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-proposer:v1.10.0";
|
|
description = "Container image for op-proposer.";
|
|
};
|
|
challenger = lib.mkOption {
|
|
type = lib.types.str;
|
|
default = "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-challenger:v1.5.1";
|
|
description = "Container image for op-challenger.";
|
|
};
|
|
disputeMon = lib.mkOption {
|
|
type = lib.types.str;
|
|
default = "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-dispute-mon:v1.4.2-rc.1";
|
|
description = "Container image for op-dispute-mon.";
|
|
};
|
|
};
|
|
};
|
|
|
|
config = lib.mkIf cfg.enable {
|
|
assertions = [
|
|
{
|
|
assertion = cfg.privateKeyFile != null;
|
|
message = "services.every-channel.op-stack.privateKeyFile must be set when the OP Stack is enabled";
|
|
}
|
|
{
|
|
assertion = builtins.pathExists bootstrapScript;
|
|
message = "missing bootstrap script at scripts/op-stack/setup-rollup.sh";
|
|
}
|
|
{
|
|
assertion = builtins.pathExists downloadScript;
|
|
message = "missing download helper at scripts/op-stack/download-op-deployer.sh";
|
|
}
|
|
{
|
|
assertion = (!cfg.challengerEnable) || cfg.challengerPrestateFile != null;
|
|
message = "services.every-channel.op-stack.challengerPrestateFile must be set when challengerEnable = true";
|
|
}
|
|
{
|
|
assertion = (!cfg.disputeMonEnable) || cfg.challengerEnable;
|
|
message = "services.every-channel.op-stack.disputeMonEnable requires challengerEnable = true";
|
|
}
|
|
];
|
|
|
|
networking.firewall = lib.mkIf cfg.openFirewall {
|
|
allowedTCPPorts = [ cfg.p2pListenPort ];
|
|
allowedUDPPorts = [ cfg.p2pListenPort ];
|
|
};
|
|
|
|
systemd.tmpfiles.rules = [
|
|
"d ${cfg.rootDir} 0750 root root - -"
|
|
"d ${cfg.rootDir}/bin 0750 root root - -"
|
|
"d ${cfg.rootDir}/deployer 0750 root root - -"
|
|
"d ${cfg.rootDir}/sequencer 0750 root root - -"
|
|
"d ${cfg.rootDir}/batcher 0750 root root - -"
|
|
"d ${cfg.rootDir}/proposer 0750 root root - -"
|
|
"d ${cfg.rootDir}/challenger 0750 root root - -"
|
|
"d ${cfg.rootDir}/challenger/data 0750 root root - -"
|
|
"d ${cfg.rootDir}/dispute-mon 0750 root root - -"
|
|
"d ${cfg.rootDir}/op-geth-data 0750 root root - -"
|
|
];
|
|
|
|
virtualisation.oci-containers.containers = {
|
|
"${containerName "geth"}" = {
|
|
image = cfg.images.opGeth;
|
|
autoStart = true;
|
|
volumes = [
|
|
"${cfg.rootDir}/sequencer:/workspace"
|
|
"${cfg.rootDir}/op-geth-data:/workspace/op-geth-data"
|
|
];
|
|
extraOptions = [ "--network=host" ];
|
|
entrypoint = "/bin/sh";
|
|
cmd = [
|
|
"-lc"
|
|
''
|
|
set -e
|
|
if [ ! -d /workspace/op-geth-data/geth/chaindata ]; then
|
|
geth init --datadir=/workspace/op-geth-data --state.scheme=hash /workspace/genesis.json
|
|
fi
|
|
exec geth --datadir=/workspace/op-geth-data --http --http.addr=127.0.0.1 --http.port=${toString cfg.ports.l2Http} --ws --ws.addr=127.0.0.1 --ws.port=${toString cfg.ports.l2Ws} --authrpc.addr=127.0.0.1 --authrpc.port=${toString cfg.ports.l2Auth} --authrpc.jwtsecret=/workspace/jwt.txt --port=${toString cfg.ports.l2P2p} --syncmode=full --gcmode=archive --rollup.disabletxpoolgossip=true --http.vhosts=* --http.corsdomain=* --http.api=eth,net,web3,debug,txpool,admin --ws.origins=* --ws.api=eth,net,web3,debug,txpool,admin --authrpc.vhosts=*
|
|
''
|
|
];
|
|
};
|
|
|
|
"${containerName "node"}" = {
|
|
image = cfg.images.opNode;
|
|
autoStart = true;
|
|
volumes = [
|
|
"${cfg.rootDir}/sequencer:/workspace"
|
|
"${cfg.rootDir}/op-geth-data:/workspace/op-geth-data"
|
|
];
|
|
environmentFiles = [ "${cfg.rootDir}/sequencer/.env" ];
|
|
extraOptions = [ "--network=host" ];
|
|
entrypoint = "/bin/sh";
|
|
cmd = [
|
|
"-lc"
|
|
''
|
|
exec op-node \
|
|
--l1="$L1_RPC_URL" \
|
|
--l1.beacon="$L1_BEACON_URL" \
|
|
--l2=http://127.0.0.1:${toString cfg.ports.l2Auth} \
|
|
--l2.jwt-secret=/workspace/jwt.txt \
|
|
--rollup.config=/workspace/rollup.json \
|
|
--sequencer.enabled=true \
|
|
--sequencer.stopped=false \
|
|
--sequencer.max-safe-lag=3600 \
|
|
--verifier.l1-confs=4 \
|
|
--p2p.listen.ip=0.0.0.0 \
|
|
--p2p.listen.tcp=${toString cfg.p2pListenPort} \
|
|
--p2p.listen.udp=${toString cfg.p2pListenPort} \
|
|
--p2p.advertise.ip="$P2P_ADVERTISE_IP" \
|
|
--p2p.advertise.tcp=${toString cfg.p2pListenPort} \
|
|
--p2p.advertise.udp=${toString cfg.p2pListenPort} \
|
|
--p2p.sequencer.key="$PRIVATE_KEY" \
|
|
--rpc.addr=127.0.0.1 \
|
|
--rpc.port=${toString cfg.ports.rollupRpc} \
|
|
--rpc.enable-admin \
|
|
--log.level=info \
|
|
--log.format=json
|
|
''
|
|
];
|
|
};
|
|
|
|
"${containerName "batcher"}" = {
|
|
image = cfg.images.batcher;
|
|
autoStart = true;
|
|
volumes = [ "${cfg.rootDir}/batcher:/workspace" ];
|
|
environmentFiles = [ "${cfg.rootDir}/batcher/.env" ];
|
|
extraOptions = [ "--network=host" ];
|
|
entrypoint = "/bin/sh";
|
|
cmd = [
|
|
"-lc"
|
|
''
|
|
exec op-batcher \
|
|
--l1-eth-rpc="$L1_RPC_URL" \
|
|
--l2-eth-rpc="$L2_RPC_URL" \
|
|
--rollup-rpc="$ROLLUP_RPC_URL" \
|
|
--private-key="$PRIVATE_KEY" \
|
|
--rpc.addr=127.0.0.1 \
|
|
--rpc.port=${toString cfg.ports.batcherRpc} \
|
|
--rpc.enable-admin \
|
|
--max-channel-duration=1 \
|
|
--data-availability-type=calldata \
|
|
--resubmission-timeout=30s \
|
|
--log.level=info \
|
|
--log.format=json
|
|
''
|
|
];
|
|
};
|
|
|
|
"${containerName "proposer"}" = {
|
|
image = cfg.images.proposer;
|
|
autoStart = true;
|
|
volumes = [ "${cfg.rootDir}/proposer:/workspace" ];
|
|
environmentFiles = [ "${cfg.rootDir}/proposer/.env" ];
|
|
extraOptions = [ "--network=host" ];
|
|
entrypoint = "/bin/sh";
|
|
cmd = [
|
|
"-lc"
|
|
''
|
|
exec op-proposer \
|
|
--rpc.port=${toString cfg.ports.proposerRpc} \
|
|
--rollup-rpc="$ROLLUP_RPC_URL" \
|
|
--l1-eth-rpc="$L1_RPC_URL" \
|
|
--private-key="$PRIVATE_KEY" \
|
|
--game-factory-address="$GAME_FACTORY_ADDRESS" \
|
|
--proposal-interval="$PROPOSAL_INTERVAL" \
|
|
--allow-non-finalized=true \
|
|
--wait-node-sync=true \
|
|
--log.level=info \
|
|
--log.format=json
|
|
''
|
|
];
|
|
};
|
|
} // lib.optionalAttrs cfg.challengerEnable {
|
|
"${containerName "challenger"}" = {
|
|
image = cfg.images.challenger;
|
|
autoStart = true;
|
|
volumes = [ "${cfg.rootDir}/challenger:/workspace" ];
|
|
environmentFiles = [ "${cfg.rootDir}/challenger/.env" ];
|
|
extraOptions = [ "--network=host" ];
|
|
entrypoint = "/bin/sh";
|
|
cmd = [
|
|
"-lc"
|
|
''
|
|
exec op-challenger run-trace \
|
|
--trace-type=cannon \
|
|
--l1-eth-rpc="$L1_RPC_URL" \
|
|
--l1-beacon="$L1_BEACON_URL" \
|
|
--private-key="$PRIVATE_KEY" \
|
|
--game-factory-address="$GAME_FACTORY_ADDRESS" \
|
|
--cannon-l2-genesis=/workspace/genesis.json \
|
|
--cannon-rollup-config=/workspace/rollup.json \
|
|
--cannon-prestate="$CANNON_PRESTATE" \
|
|
--l2-eth-rpc="$L2_RPC_URL" \
|
|
--rollup-rpc="$ROLLUP_RPC_URL" \
|
|
--datadir=/workspace/data \
|
|
--log.level=info \
|
|
--log.format=json
|
|
''
|
|
];
|
|
};
|
|
} // lib.optionalAttrs cfg.disputeMonEnable {
|
|
"${containerName "dispute-mon"}" = {
|
|
image = cfg.images.disputeMon;
|
|
autoStart = true;
|
|
volumes = [ "${cfg.rootDir}/dispute-mon:/workspace" ];
|
|
environmentFiles = [ "${cfg.rootDir}/dispute-mon/.env" ];
|
|
extraOptions = [ "--network=host" ];
|
|
entrypoint = "/bin/sh";
|
|
cmd = [
|
|
"-lc"
|
|
''
|
|
exec op-dispute-mon
|
|
''
|
|
];
|
|
};
|
|
};
|
|
|
|
systemd.services = {
|
|
every-channel-op-stack-bootstrap = {
|
|
description = "every.channel OP Stack bootstrap";
|
|
after = [ "network-online.target" ];
|
|
wants = [ "network-online.target" ];
|
|
wantedBy = [ "multi-user.target" ];
|
|
path = with pkgs; [
|
|
bash
|
|
coreutils
|
|
curl
|
|
gnutar
|
|
gzip
|
|
jq
|
|
openssl
|
|
foundry
|
|
python3
|
|
];
|
|
serviceConfig = {
|
|
Type = "oneshot";
|
|
RemainAfterExit = true;
|
|
};
|
|
script = ''
|
|
set -euo pipefail
|
|
export EVERY_CHANNEL_OP_STACK_ROOT=${lib.escapeShellArg cfg.rootDir}
|
|
export EVERY_CHANNEL_OP_STACK_PRIVATE_KEY_FILE=${lib.escapeShellArg cfg.privateKeyFile}
|
|
export EVERY_CHANNEL_OP_STACK_L1_RPC_URL=${lib.escapeShellArg cfg.l1RpcUrl}
|
|
export EVERY_CHANNEL_OP_STACK_L1_BEACON_URL=${lib.escapeShellArg cfg.l1BeaconUrl}
|
|
export EVERY_CHANNEL_OP_STACK_CHAIN_ID=${toString cfg.chainId}
|
|
export EVERY_CHANNEL_OP_STACK_P2P_ADVERTISE_IP=${lib.escapeShellArg cfg.p2pAdvertiseIp}
|
|
export EVERY_CHANNEL_OP_STACK_L2_RPC_URL=http://127.0.0.1:${toString cfg.ports.l2Http}
|
|
export EVERY_CHANNEL_OP_STACK_ROLLUP_RPC_URL=http://127.0.0.1:${toString cfg.ports.rollupRpc}
|
|
export EVERY_CHANNEL_OP_DEPLOYER_BIN=${lib.escapeShellArg "${cfg.rootDir}/bin/op-deployer"}
|
|
export EVERY_CHANNEL_OP_DEPLOYER_TAG=${lib.escapeShellArg cfg.opDeployerTag}
|
|
export EVERY_CHANNEL_OP_DEPLOYER_DOWNLOAD_SCRIPT=${lib.escapeShellArg downloadScript}
|
|
export EVERY_CHANNEL_OP_STACK_CHALLENGER_PRESTATE_FILE=${lib.escapeShellArg (if cfg.challengerPrestateFile == null then "" else cfg.challengerPrestateFile)}
|
|
${lib.escapeShellArg bootstrapScript}
|
|
'';
|
|
};
|
|
"podman-${containerName "geth"}" = {
|
|
after = [ "every-channel-op-stack-bootstrap.service" ];
|
|
wants = [ "every-channel-op-stack-bootstrap.service" ];
|
|
requires = [ "every-channel-op-stack-bootstrap.service" ];
|
|
};
|
|
"podman-${containerName "node"}" = {
|
|
after = [ "every-channel-op-stack-bootstrap.service" "podman-${containerName "geth"}.service" ];
|
|
wants = [ "every-channel-op-stack-bootstrap.service" "podman-${containerName "geth"}.service" ];
|
|
requires = [ "every-channel-op-stack-bootstrap.service" ];
|
|
};
|
|
"podman-${containerName "batcher"}" = {
|
|
after = [ "podman-${containerName "node"}.service" ];
|
|
wants = [ "podman-${containerName "node"}.service" ];
|
|
};
|
|
"podman-${containerName "proposer"}" = {
|
|
after = [ "podman-${containerName "node"}.service" ];
|
|
wants = [ "podman-${containerName "node"}.service" ];
|
|
};
|
|
} // lib.optionalAttrs cfg.challengerEnable {
|
|
"podman-${containerName "challenger"}" = {
|
|
after = [ "podman-${containerName "node"}.service" ];
|
|
wants = [ "podman-${containerName "node"}.service" ];
|
|
};
|
|
} // lib.optionalAttrs cfg.disputeMonEnable {
|
|
"podman-${containerName "dispute-mon"}" = {
|
|
after = [ "podman-${containerName "node"}.service" ];
|
|
wants = [ "podman-${containerName "node"}.service" ];
|
|
};
|
|
};
|
|
};
|
|
}
|